diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index 4e9987401af..c526dc3664a 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -33,7 +33,11 @@ var ( flagNoReport bool flagValidateMigration bool flagAllowPartialStateFromPayloads bool + flagSortPayloads bool + flagPrune bool flagLogVerboseValidationError bool + flagDiffMigration bool + flagLogVerboseDiff bool flagStagedContractsFile string flagInputPayloadFileName string flagOutputPayloadFileName string @@ -81,12 +85,24 @@ func init() { Cmd.Flags().BoolVar(&flagLogVerboseValidationError, "log-verbose-validation-error", false, "log entire Cadence values on validation error (atree migration)") + Cmd.Flags().BoolVar(&flagDiffMigration, "diff", false, + "compare Cadence values and log diff (migration)") + + Cmd.Flags().BoolVar(&flagLogVerboseDiff, "log-verbose-diff", false, + "log entire Cadence values on diff (requires --diff flag)") + Cmd.Flags().StringVar(&flagStagedContractsFile, "staged-contracts", "", "Staged contracts CSV file") Cmd.Flags().BoolVar(&flagAllowPartialStateFromPayloads, "allow-partial-state-from-payload-file", false, "allow input payload file containing partial state (e.g. not all accounts)") + Cmd.Flags().BoolVar(&flagSortPayloads, "sort-payloads", true, + "sort payloads (generate deterministic output; disable only for development purposes)") + + Cmd.Flags().BoolVar(&flagPrune, "prune", false, + "prune the state (for development purposes)") + // If specified, the state will consist of payloads from the given input payload file. // If not specified, then the state will be extracted from the latest checkpoint file. // This flag can be used to reduce total duration of migrations when state extraction involves @@ -139,6 +155,10 @@ func run(*cobra.Command, []string) { log.Fatal().Msg("--extract-payloads-by-address requires --output-payload-filename to be specified") } + if flagValidateMigration && flagDiffMigration { + log.Fatal().Msg("Both --validate and --diff are enabled, please specify only one (or none) of these") + } + if len(flagBlockHash) > 0 { blockID, err := flow.HexStringToIdentifier(flagBlockHash) if err != nil { @@ -241,11 +261,19 @@ func run(*cobra.Command, []string) { } if flagValidateMigration { - log.Warn().Msgf("atree migration validation flag is enabled and will increase duration of migration") + log.Warn().Msgf("--validate flag is enabled and will increase duration of migration") } if flagLogVerboseValidationError { - log.Warn().Msgf("atree migration has verbose validation error logging enabled which may increase size of log") + log.Warn().Msgf("--log-verbose-validation-error flag is enabled which may increase size of log") + } + + if flagDiffMigration { + log.Warn().Msgf("--diff flag is enabled and will increase duration of migration") + } + + if flagLogVerboseDiff { + log.Warn().Msgf("--log-verbose-diff flag is enabled which may increase size of log") } var inputMsg string @@ -287,6 +315,14 @@ func run(*cobra.Command, []string) { // TODO: evmContractChange := migrations.EVMContractChangeNone + var burnerContractChange migrations.BurnerContractChange + switch chainID { + case flow.Emulator: + burnerContractChange = migrations.BurnerContractChangeDeploy + case flow.Testnet, flow.Mainnet: + burnerContractChange = migrations.BurnerContractChangeUpdate + } + stagedContracts, err := migrations.StagedContractsFromCSV(flagStagedContractsFile) if err != nil { log.Fatal().Err(err).Msgf("error loading staged contracts: %s", err.Error()) @@ -299,12 +335,17 @@ func run(*cobra.Command, []string) { flagOutputDir, flagNWorker, !flagNoMigration, + flagDiffMigration, + flagLogVerboseDiff, chainID, evmContractChange, + burnerContractChange, stagedContracts, flagInputPayloadFileName, flagOutputPayloadFileName, exportedAddresses, + flagSortPayloads, + flagPrune, ) } else { err = extractExecutionState( @@ -314,11 +355,16 @@ func run(*cobra.Command, []string) { flagOutputDir, flagNWorker, !flagNoMigration, + flagDiffMigration, + flagLogVerboseDiff, chainID, evmContractChange, + burnerContractChange, stagedContracts, flagOutputPayloadFileName, exportedAddresses, + flagSortPayloads, + flagPrune, ) } diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 25bcadbf2ab..f5c14ec7eaa 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" @@ -37,11 +38,16 @@ func extractExecutionState( outputDir string, nWorker int, // number of concurrent worker to migration payloads runMigrations bool, + diffMigrations bool, + logVerboseDiff bool, chainID flow.ChainID, evmContractChange migrators.EVMContractChange, + burnerContractChange migrators.BurnerContractChange, stagedContracts []migrators.StagedContract, outputPayloadFile string, exportPayloadsByAddresses []common.Address, + sortPayloads bool, + prune bool, ) error { log.Info().Msg("init WAL") @@ -78,7 +84,16 @@ func extractExecutionState( log.Info().Msg("init compactor") - compactor, err := complete.NewCompactor(led, diskWal, log, complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) + compactor, err := complete.NewCompactor( + led, + diskWal, + log, + complete.DefaultCacheSize, + checkpointDistance, + checkpointsToKeep, + atomic.NewBool(false), + &metrics.NoopCollector{}, + ) if err != nil { return fmt.Errorf("cannot create compactor: %w", err) } @@ -97,9 +112,13 @@ func extractExecutionState( dir, nWorker, runMigrations, + diffMigrations, + logVerboseDiff, chainID, evmContractChange, + burnerContractChange, stagedContracts, + prune, ) newState := ledger.State(targetHash) @@ -118,43 +137,29 @@ func extractExecutionState( // create reporter reporter := reporters.NewExportReporter( log, - func() flow.StateCommitment { return targetHash }, + func() flow.StateCommitment { + return targetHash + }, ) newMigratedState := ledger.State(newTrie.RootHash()) err = reporter.Report(nil, newMigratedState) if err != nil { - log.Error().Err(err).Msgf("can not generate report for migrated state: %v", newMigratedState) + log.Err(err).Msgf("can not generate report for migrated state: %v", newMigratedState) } - exportPayloads := len(outputPayloadFile) > 0 - if exportPayloads { + if len(outputPayloadFile) > 0 { payloads := newTrie.AllPayloads() - log.Info().Msgf("sorting %d payloads", len(payloads)) - - // Sort payloads to produce deterministic payload file with - // same sequence of payloads inside. - payloads = util.SortPayloadsByAddress(payloads, nWorker) - - log.Info().Msgf("sorted %d payloads", len(payloads)) - - log.Info().Msgf("creating payloads file %s", outputPayloadFile) - - exportedPayloadCount, err := util.CreatePayloadFile( + return exportPayloads( log, - outputPayloadFile, payloads, + nWorker, + outputPayloadFile, exportPayloadsByAddresses, false, // payloads represents entire state. + sortPayloads, ) - if err != nil { - return fmt.Errorf("cannot generate payloads file: %w", err) - } - - log.Info().Msgf("Exported %d payloads out of %d payloads", exportedPayloadCount, len(payloads)) - - return nil } migratedState, err := createCheckpoint( @@ -215,18 +220,37 @@ func writeStatusFile(fileName string, e error) error { return err } +func ByteCountIEC(b int64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %ciB", + float64(b)/float64(div), "KMGTPE"[exp]) +} + func extractExecutionStateFromPayloads( log zerolog.Logger, dir string, outputDir string, nWorker int, // number of concurrent worker to migation payloads runMigrations bool, + diffMigrations bool, + logVerboseDiff bool, chainID flow.ChainID, evmContractChange migrators.EVMContractChange, + burnerContractChange migrators.BurnerContractChange, stagedContracts []migrators.StagedContract, inputPayloadFile string, outputPayloadFile string, exportPayloadsByAddresses []common.Address, + sortPayloads bool, + prune bool, ) error { inputPayloadsFromPartialState, payloads, err := util.ReadPayloadFile(log, inputPayloadFile) @@ -236,14 +260,48 @@ func extractExecutionStateFromPayloads( log.Info().Msgf("read %d payloads", len(payloads)) + if log.Debug().Enabled() { + + type accountInfo struct { + count int + size uint64 + } + payloadCountByAddress := make(map[string]accountInfo) + + for _, payload := range payloads { + registerID, payloadValue, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("cannot convert payload to register: %w", err) + } + owner := registerID.Owner + accountInfo := payloadCountByAddress[owner] + accountInfo.count++ + accountInfo.size += uint64(len(payloadValue)) + payloadCountByAddress[owner] = accountInfo + } + + for address, info := range payloadCountByAddress { + log.Debug().Msgf( + "address %x has %d payloads and a total size of %s", + address, + info.count, + ByteCountIEC(int64(info.size)), + ) + } + } + migrations := newMigrations( log, dir, nWorker, runMigrations, + diffMigrations, + logVerboseDiff, chainID, evmContractChange, + burnerContractChange, stagedContracts, + prune, ) payloads, err = migratePayloads(log, payloads, migrations) @@ -251,33 +309,16 @@ func extractExecutionStateFromPayloads( return err } - exportPayloads := len(outputPayloadFile) > 0 - if exportPayloads { - - log.Info().Msgf("sorting %d payloads", len(payloads)) - - // Sort payloads to produce deterministic payload file with - // same sequence of payloads inside. - payloads = util.SortPayloadsByAddress(payloads, nWorker) - - log.Info().Msgf("sorted %d payloads", len(payloads)) - - log.Info().Msgf("creating payloads file %s", outputPayloadFile) - - exportedPayloadCount, err := util.CreatePayloadFile( + if len(outputPayloadFile) > 0 { + return exportPayloads( log, - outputPayloadFile, payloads, + nWorker, + outputPayloadFile, exportPayloadsByAddresses, inputPayloadsFromPartialState, + sortPayloads, ) - if err != nil { - return fmt.Errorf("cannot generate payloads file: %w", err) - } - - log.Info().Msgf("Exported %d payloads out of %d payloads", exportedPayloadCount, len(payloads)) - - return nil } newTrie, err := createTrieFromPayloads(log, payloads) @@ -304,6 +345,43 @@ func extractExecutionStateFromPayloads( return nil } +func exportPayloads( + log zerolog.Logger, + payloads []*ledger.Payload, + nWorker int, + outputPayloadFile string, + exportPayloadsByAddresses []common.Address, + inputPayloadsFromPartialState bool, + sortPayloads bool, +) error { + if sortPayloads { + log.Info().Msgf("sorting %d payloads", len(payloads)) + + // Sort payloads to produce deterministic payload file with + // same sequence of payloads inside. + payloads = util.SortPayloadsByAddress(payloads, nWorker) + + log.Info().Msgf("sorted %d payloads", len(payloads)) + } + + log.Info().Msgf("creating payloads file %s", outputPayloadFile) + + exportedPayloadCount, err := util.CreatePayloadFile( + log, + outputPayloadFile, + payloads, + exportPayloadsByAddresses, + inputPayloadsFromPartialState, + ) + if err != nil { + return fmt.Errorf("cannot generate payloads file: %w", err) + } + + log.Info().Msgf("exported %d payloads out of %d payloads", exportedPayloadCount, len(payloads)) + + return nil +} + func migratePayloads(logger zerolog.Logger, payloads []*ledger.Payload, migrations []ledger.Migration) ([]*ledger.Payload, error) { if len(migrations) == 0 { @@ -373,22 +451,41 @@ func newMigrations( dir string, nWorker int, runMigrations bool, + diffMigrations bool, + logVerboseDiff bool, chainID flow.ChainID, evmContractChange migrators.EVMContractChange, + burnerContractChange migrators.BurnerContractChange, stagedContracts []migrators.StagedContract, + prune bool, ) []ledger.Migration { if !runMigrations { return nil } + log.Info().Msgf("initializing migrations") + rwf := reporters.NewReportFileWriterFactory(dir, log) - return migrators.NewCadence1Migrations( + namedMigrations := migrators.NewCadence1Migrations( log, rwf, nWorker, chainID, + diffMigrations, + logVerboseDiff, evmContractChange, + burnerContractChange, stagedContracts, + prune, ) + + migrations := make([]ledger.Migration, 0, len(namedMigrations)) + for _, migration := range namedMigrations { + migrations = append(migrations, migration.Migrate) + } + + log.Info().Msgf("initialized migrations") + + return migrations } diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go index 93958be5eef..82ba3bac242 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go @@ -73,12 +73,16 @@ func TestExtractExecutionState(t *testing.T) { outdir, 10, false, + false, + false, flow.Emulator, - // TODO: migrations.EVMContractChangeNone, + migrations.BurnerContractChangeDeploy, nil, "", nil, + false, + false, ) require.Error(t, err) }) diff --git a/cmd/util/ledger/migrations/account_storage_migration.go b/cmd/util/ledger/migrations/account_storage_migration.go new file mode 100644 index 00000000000..060d41cf168 --- /dev/null +++ b/cmd/util/ledger/migrations/account_storage_migration.go @@ -0,0 +1,74 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +func NewAccountStorageMigration( + address common.Address, + log zerolog.Logger, + migrate func(*runtime.Storage, *interpreter.Interpreter) error, +) ledger.Migration { + return func(payloads []*ledger.Payload) ([]*ledger.Payload, error) { + + migrationRuntime, err := NewMigratorRuntime( + address, + payloads, + util.RuntimeInterfaceConfig{}, + ) + if err != nil { + return nil, fmt.Errorf("failed to create migrator runtime: %w", err) + } + + storage := migrationRuntime.Storage + inter := migrationRuntime.Interpreter + + err = migrate(storage, inter) + if err != nil { + return nil, fmt.Errorf("failed to migrate storage: %w", err) + } + + err = storage.Commit(inter, false) + if err != nil { + return nil, fmt.Errorf("failed to commit changes: %w", err) + } + + err = storage.CheckHealth() + if err != nil { + log.Err(err).Msg("storage health check failed") + } + + // finalize the transaction + result, err := migrationRuntime.TransactionState.FinalizeMainTransaction() + if err != nil { + return nil, fmt.Errorf("failed to finalize main transaction: %w", err) + } + + // Merge the changes to the original payloads. + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(address): {}, + } + + newPayloads, err := MergeRegisterChanges( + migrationRuntime.Snapshot.Payloads, + result.WriteSet, + expectedAddresses, + nil, + log, + ) + if err != nil { + return nil, fmt.Errorf("failed to merge register changes: %w", err) + } + + return newPayloads, nil + } +} diff --git a/cmd/util/ledger/migrations/cadence.go b/cmd/util/ledger/migrations/cadence.go index 1b6418e41eb..b0874c6a71b 100644 --- a/cmd/util/ledger/migrations/cadence.go +++ b/cmd/util/ledger/migrations/cadence.go @@ -156,12 +156,19 @@ func fungibleTokenResolverRule( return oldType, newType } +type NamedMigration struct { + Name string + Migrate ledger.Migration +} + func NewCadence1ValueMigrations( log zerolog.Logger, rwf reporters.ReportWriterFactory, nWorker int, chainID flow.ChainID, -) (migrations []ledger.Migration) { + diffMigrations bool, + logVerboseDiff bool, +) (migrations []NamedMigration) { // Populated by CadenceLinkValueMigrator, // used by CadenceCapabilityValueMigrator @@ -169,32 +176,41 @@ func NewCadence1ValueMigrations( errorMessageHandler := &errorMessageHandler{} - for _, accountBasedMigration := range []AccountBasedMigration{ + for _, accountBasedMigration := range []*CadenceBaseMigrator{ NewCadence1ValueMigrator( rwf, + diffMigrations, + logVerboseDiff, errorMessageHandler, NewCadence1CompositeStaticTypeConverter(chainID), NewCadence1InterfaceStaticTypeConverter(chainID), ), NewCadence1LinkValueMigrator( rwf, + diffMigrations, + logVerboseDiff, errorMessageHandler, capabilityMapping, ), NewCadence1CapabilityValueMigrator( rwf, + diffMigrations, + logVerboseDiff, errorMessageHandler, capabilityMapping, ), } { migrations = append( migrations, - NewAccountBasedMigration( - log, - nWorker, []AccountBasedMigration{ - accountBasedMigration, - }, - ), + NamedMigration{ + Name: accountBasedMigration.name, + Migrate: NewAccountBasedMigration( + log, + nWorker, []AccountBasedMigration{ + accountBasedMigration, + }, + ), + }, ) } @@ -206,36 +222,59 @@ func NewCadence1ContractsMigrations( nWorker int, chainID flow.ChainID, evmContractChange EVMContractChange, + burnerContractChange BurnerContractChange, stagedContracts []StagedContract, -) []ledger.Migration { +) []NamedMigration { + + systemContractsMigration := NewSystemContractsMigration( + chainID, + log, + SystemContractChangesOptions{ + EVM: evmContractChange, + Burner: burnerContractChange, + }, + ) - stagedContractsMigration := NewStagedContractsMigration(chainID). + stagedContractsMigration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() stagedContractsMigration.RegisterContractUpdates(stagedContracts) - return []ledger.Migration{ - NewAccountBasedMigration( + toAccountBasedMigration := func(migration AccountBasedMigration) ledger.Migration { + return NewAccountBasedMigration( log, nWorker, []AccountBasedMigration{ - NewSystemContactsMigration( - chainID, - SystemContractChangesOptions{ - EVM: evmContractChange, - }, - ), + migration, }, - ), - NewBurnerDeploymentMigration(chainID, log), - NewAccountBasedMigration( - log, - nWorker, - []AccountBasedMigration{ - stagedContractsMigration, + ) + } + + var migrations []NamedMigration + + if burnerContractChange == BurnerContractChangeDeploy { + migrations = append( + migrations, + NamedMigration{ + Name: "burner-deployment-migration", + Migrate: NewBurnerDeploymentMigration(chainID, log), }, - ), + ) } + + migrations = append( + migrations, + NamedMigration{ + Name: "system-contracts-update-migration", + Migrate: toAccountBasedMigration(systemContractsMigration), + }, + NamedMigration{ + Name: "staged-contracts-update-migration", + Migrate: toAccountBasedMigration(stagedContractsMigration), + }, + ) + + return migrations } func NewCadence1Migrations( @@ -243,22 +282,52 @@ func NewCadence1Migrations( rwf reporters.ReportWriterFactory, nWorker int, chainID flow.ChainID, + diffMigrations bool, + logVerboseDiff bool, evmContractChange EVMContractChange, + burnerContractChange BurnerContractChange, stagedContracts []StagedContract, -) []ledger.Migration { - return common.Concat( + prune bool, +) []NamedMigration { + + var migrations []NamedMigration + + if prune { + migration := NewCadence1PruneMigration(chainID, log) + if migration != nil { + migrations = append( + migrations, + NamedMigration{ + Name: "prune-migration", + Migrate: migration, + }, + ) + } + } + + migrations = append( + migrations, NewCadence1ContractsMigrations( log, nWorker, chainID, evmContractChange, + burnerContractChange, stagedContracts, - ), + )..., + ) + + migrations = append( + migrations, NewCadence1ValueMigrations( log, rwf, nWorker, chainID, - ), + diffMigrations, + logVerboseDiff, + )..., ) + + return migrations } diff --git a/cmd/util/ledger/migrations/cadence_value_diff.go b/cmd/util/ledger/migrations/cadence_value_diff.go new file mode 100644 index 00000000000..94e148920e3 --- /dev/null +++ b/cmd/util/ledger/migrations/cadence_value_diff.go @@ -0,0 +1,767 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/ledger" +) + +type diffKind int + +const ( + storageMapExistDiffKind diffKind = iota // Storage map only exists in one state + storageMapKeyDiffKind // Storage map keys are different + storageMapValueDiffKind // Storage map values are different (only with verbose logging) + cadenceValueDiffKind // Cadence values are different + cadenceValueTypeDiffKind // Cadence value types are different + cadenceValueStaticTypeDiffKind // Cadence value static types are different +) + +var diffKindString = map[diffKind]string{ + storageMapExistDiffKind: "storage_map_exist_diff", + storageMapKeyDiffKind: "storage_map_key_diff", + storageMapValueDiffKind: "storage_map_value_diff", + cadenceValueDiffKind: "cadence_value_diff", + cadenceValueTypeDiffKind: "cadence_value_type_diff", + cadenceValueStaticTypeDiffKind: "cadence_value_static_type_diff", +} + +type diffErrorKind int + +const ( + abortErrorKind diffErrorKind = iota + storageMapKeyNotImplementingStorageMapKeyDiffErrorKind + cadenceValueNotImplementEquatableValueDiffErrorKind +) + +var diffErrorKindString = map[diffErrorKind]string{ + abortErrorKind: "error_diff_failed", + storageMapKeyNotImplementingStorageMapKeyDiffErrorKind: "error_storage_map_key_not_implemeting_StorageMapKey", + cadenceValueNotImplementEquatableValueDiffErrorKind: "error_cadence_value_not_implementing_EquatableValue", +} + +type diffError struct { + Address string + Kind string + Msg string +} + +type diffProblem struct { + Address string + Domain string + Kind string + Msg string + Trace string `json:",omitempty"` +} + +type difference struct { + Address string + Domain string + Kind string + Msg string + Trace string `json:",omitempty"` + OldValue string `json:",omitempty"` + NewValue string `json:",omitempty"` + OldValueStaticType string `json:",omitempty"` + NewValueStaticType string `json:",omitempty"` +} + +type CadenceValueDiffReporter struct { + address common.Address + reportWriter reporters.ReportWriter + verboseLogging bool +} + +func NewCadenceValueDiffReporter( + address common.Address, + rw reporters.ReportWriter, + verboseLogging bool, +) *CadenceValueDiffReporter { + return &CadenceValueDiffReporter{ + address: address, + reportWriter: rw, + verboseLogging: verboseLogging, + } +} + +func (dr *CadenceValueDiffReporter) DiffStates(oldPayloads, newPayloads []*ledger.Payload, domains []string) { + // Create all the runtime components we need for comparing Cadence values. + oldRuntime, err := newReadonlyStorageRuntime(oldPayloads) + if err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to create runtime with old state payloads: %s", err), + }) + return + } + + newRuntime, err := newReadonlyStorageRuntime(newPayloads) + if err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to create runtime with new state payloads: %s", err), + }) + return + } + + // Iterate through all domains and compare cadence values. + for _, domain := range domains { + dr.diffStorageDomain(oldRuntime, newRuntime, domain) + } +} + +func (dr *CadenceValueDiffReporter) diffStorageDomain(oldRuntime, newRuntime *readonlyStorageRuntime, domain string) { + + oldStorageMap := oldRuntime.Storage.GetStorageMap(dr.address, domain, false) + + newStorageMap := newRuntime.Storage.GetStorageMap(dr.address, domain, false) + + if oldStorageMap == nil && newStorageMap == nil { + // No storage maps for this domain. + return + } + + if oldStorageMap == nil && newStorageMap != nil { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[storageMapExistDiffKind], + Msg: fmt.Sprintf( + "old storage map doesn't exist, new storage map has %d elements with keys %v", + newStorageMap.Count(), + getStorageMapKeys(newStorageMap), + ), + }) + + return + } + + if oldStorageMap != nil && newStorageMap == nil { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[storageMapExistDiffKind], + Msg: fmt.Sprintf( + "new storage map doesn't exist, old storage map has %d elements with keys %v", + oldStorageMap.Count(), + getStorageMapKeys(oldStorageMap), + ), + }) + + return + } + + oldKeys := getStorageMapKeys(oldStorageMap) + newKeys := getStorageMapKeys(newStorageMap) + + onlyOldKeys, onlyNewKeys, sharedKeys := diff(oldKeys, newKeys) + + // Log keys only present in old storage map + if len(onlyOldKeys) > 0 { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[storageMapKeyDiffKind], + Msg: fmt.Sprintf( + "old storage map has %d elements with keys %v, that are not present in new storge map", + len(onlyOldKeys), + onlyOldKeys, + ), + }) + } + + // Log keys only present in new storage map + if len(onlyNewKeys) > 0 { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[storageMapKeyDiffKind], + Msg: fmt.Sprintf( + "new storage map has %d elements with keys %v, that are not present in old storge map", + len(onlyNewKeys), + onlyNewKeys, + ), + }) + } + + // Compare elements present in both storage maps + for _, key := range sharedKeys { + + trace := fmt.Sprintf("%s[%v]", domain, key) + + var mapKey interpreter.StorageMapKey + + switch key := key.(type) { + case interpreter.StringAtreeValue: + mapKey = interpreter.StringStorageMapKey(key) + + case interpreter.Uint64AtreeValue: + mapKey = interpreter.Uint64StorageMapKey(key) + + case interpreter.StringStorageMapKey: + mapKey = key + + case interpreter.Uint64StorageMapKey: + mapKey = key + + default: + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffErrorKindString[storageMapKeyNotImplementingStorageMapKeyDiffErrorKind], + Trace: trace, + Msg: fmt.Sprintf( + "invalid storage map key %v (%T), expected interpreter.StorageMapKey", + key, + key, + ), + }) + continue + } + + oldValue := oldStorageMap.ReadValue(nopMemoryGauge, mapKey) + + newValue := newStorageMap.ReadValue(nopMemoryGauge, mapKey) + + hasDifference := dr.diffValues( + oldRuntime.Interpreter, + oldValue, + newRuntime.Interpreter, + newValue, + domain, + trace, + ) + if hasDifference { + if dr.verboseLogging { + // Log potentially large values at top level only when verbose logging is enabled. + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[storageMapValueDiffKind], + Msg: "storage map elements are different", + Trace: trace, + OldValue: oldValue.String(), + NewValue: newValue.String(), + OldValueStaticType: oldValue.StaticType(oldRuntime.Interpreter).String(), + NewValueStaticType: newValue.StaticType(newRuntime.Interpreter).String(), + }) + } + } + + } +} + +func (dr *CadenceValueDiffReporter) diffValues( + vInterpreter *interpreter.Interpreter, + v interpreter.Value, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain string, + trace string, +) (hasDifference bool) { + switch v := v.(type) { + case *interpreter.ArrayValue: + return dr.diffCadenceArrayValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.CompositeValue: + return dr.diffCadenceCompositeValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.DictionaryValue: + return dr.diffCadenceDictionaryValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.SomeValue: + return dr.diffCadenceSomeValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + default: + oldValue, ok := v.(interpreter.EquatableValue) + if !ok { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffErrorKindString[cadenceValueNotImplementEquatableValueDiffErrorKind], + Trace: trace, + Msg: fmt.Sprintf("old value doesn't implement interpreter.EquatableValue: %s (%T)", oldValue.String(), oldValue), + }) + return true + } + + if !oldValue.Equal(nil, interpreter.EmptyLocationRange, other) { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Msg: fmt.Sprintf("values differ: %T vs %T", oldValue, other), + Trace: trace, + OldValue: v.String(), + NewValue: other.String(), + OldValueStaticType: v.StaticType(vInterpreter).String(), + NewValueStaticType: other.StaticType(otherInterpreter).String(), + }) + return true + } + } + + return false +} + +func (dr *CadenceValueDiffReporter) diffCadenceSomeValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.SomeValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain string, + trace string, +) (hasDifference bool) { + otherSome, ok := other.(*interpreter.SomeValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + innerValue := v.InnerValue(vInterpreter, interpreter.EmptyLocationRange) + + otherInnerValue := otherSome.InnerValue(otherInterpreter, interpreter.EmptyLocationRange) + + return dr.diffValues(vInterpreter, innerValue, otherInterpreter, otherInnerValue, domain, trace) +} + +func (dr *CadenceValueDiffReporter) diffCadenceArrayValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.ArrayValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain string, + trace string, +) (hasDifference bool) { + otherArray, ok := other.(*interpreter.ArrayValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if v.Type == nil && otherArray.Type != nil { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("array static types differ: nil != %s", otherArray.Type), + }) + } + + if v.Type != nil && otherArray.Type == nil { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("array static types differ: %s != nil", v.Type), + }) + } + + if v.Type != nil && otherArray.Type != nil && !v.Type.Equal(otherArray.Type) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("array static types differ: %s != %s", v.Type, otherArray.Type), + }) + } + + count := v.Count() + if count != otherArray.Count() { + hasDifference = true + + d := difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace, + Msg: fmt.Sprintf("array counts differ: %d != %d", count, otherArray.Count()), + } + + if dr.verboseLogging { + d.OldValue = v.String() + d.NewValue = other.String() + } + + dr.reportWriter.Write(d) + } + + // Compare array elements + for i := 0; i < min(count, otherArray.Count()); i++ { + element := v.Get(vInterpreter, interpreter.EmptyLocationRange, i) + otherElement := otherArray.Get(otherInterpreter, interpreter.EmptyLocationRange, i) + + elementTrace := fmt.Sprintf("%s[%d]", trace, i) + elementHasDifference := dr.diffValues(vInterpreter, element, otherInterpreter, otherElement, domain, elementTrace) + if elementHasDifference { + hasDifference = true + } + } + + return hasDifference +} + +func (dr *CadenceValueDiffReporter) diffCadenceCompositeValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.CompositeValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain string, + trace string, +) (hasDifference bool) { + otherComposite, ok := other.(*interpreter.CompositeValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if !v.StaticType(vInterpreter).Equal(otherComposite.StaticType(otherInterpreter)) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "composite static types differ: %s != %s", + v.StaticType(vInterpreter), + otherComposite.StaticType(otherInterpreter)), + }) + } + + if v.Kind != otherComposite.Kind { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "composite kinds differ: %d != %d", + v.Kind, + otherComposite.Kind, + ), + }) + } + + oldFieldNames := make([]string, 0, v.FieldCount()) + v.ForEachFieldName(func(fieldName string) bool { + oldFieldNames = append(oldFieldNames, fieldName) + return true + }) + + newFieldNames := make([]string, 0, otherComposite.FieldCount()) + otherComposite.ForEachFieldName(func(fieldName string) bool { + newFieldNames = append(newFieldNames, fieldName) + return true + }) + + onlyOldFieldNames, onlyNewFieldNames, sharedFieldNames := diff(oldFieldNames, newFieldNames) + + // Log field names only present in old composite value + if len(onlyOldFieldNames) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "old composite value has %d fields with keys %v, that are not present in new composite value", + len(onlyOldFieldNames), + onlyOldFieldNames, + ), + }) + } + + // Log field names only present in new composite value + if len(onlyNewFieldNames) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "new composite value has %d fields with keys %v, that are not present in old composite value", + len(onlyNewFieldNames), + onlyNewFieldNames, + ), + }) + } + + // Compare fields in both composite values + for _, fieldName := range sharedFieldNames { + fieldValue := v.GetField(vInterpreter, interpreter.EmptyLocationRange, fieldName) + otherFieldValue := otherComposite.GetField(otherInterpreter, interpreter.EmptyLocationRange, fieldName) + + fieldTrace := fmt.Sprintf("%s.%s", trace, fieldName) + fieldHasDifference := dr.diffValues(vInterpreter, fieldValue, otherInterpreter, otherFieldValue, domain, fieldTrace) + if fieldHasDifference { + hasDifference = true + } + } + + return hasDifference +} + +func (dr *CadenceValueDiffReporter) diffCadenceDictionaryValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.DictionaryValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain string, + trace string, +) (hasDifference bool) { + otherDictionary, ok := other.(*interpreter.DictionaryValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if !v.Type.Equal(otherDictionary.Type) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "dict static types differ: %s != %s", + v.Type, + otherDictionary.Type), + }) + } + + oldKeys := make([]interpreter.Value, 0, v.Count()) + v.IterateKeys(vInterpreter, func(key interpreter.Value) (resume bool) { + oldKeys = append(oldKeys, key) + return true + }) + + newKeys := make([]interpreter.Value, 0, otherDictionary.Count()) + otherDictionary.IterateKeys(otherInterpreter, func(key interpreter.Value) (resume bool) { + newKeys = append(newKeys, key) + return true + }) + + onlyOldKeys, onlyNewKeys, sharedKeys := diffCadenceValues(oldKeys, newKeys) + + // Log keys only present in old dict value + if len(onlyOldKeys) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "old dict value has %d elements with keys %v, that are not present in new dict value", + len(onlyOldKeys), + onlyOldKeys, + ), + }) + } + + // Log field names only present in new composite value + if len(onlyNewKeys) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain, + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace, + Msg: fmt.Sprintf( + "new dict value has %d elements with keys %v, that are not present in old dict value", + len(onlyNewKeys), + onlyNewKeys, + ), + }) + } + + // Compare elements in both dict values + for _, key := range sharedKeys { + valueTrace := fmt.Sprintf("%s[%v]", trace, key) + + oldValue, _ := v.Get(vInterpreter, interpreter.EmptyLocationRange, key) + + newValue, _ := otherDictionary.Get(otherInterpreter, interpreter.EmptyLocationRange, key) + + elementHasDifference := dr.diffValues(vInterpreter, oldValue, otherInterpreter, newValue, domain, valueTrace) + if elementHasDifference { + hasDifference = true + } + } + + return hasDifference +} + +func getStorageMapKeys(storageMap *interpreter.StorageMap) []any { + keys := make([]any, 0, storageMap.Count()) + + iter := storageMap.Iterator(nil) + for { + key := iter.NextKey() + if key == nil { + break + } + keys = append(keys, key) + } + + return keys +} + +func diff[T comparable](old, new []T) (onlyOld, onlyNew, shared []T) { + onlyOld = make([]T, 0, len(old)) + onlyNew = make([]T, 0, len(new)) + shared = make([]T, 0, min(len(old), len(new))) + + sharedNew := make([]bool, len(new)) + + for _, o := range old { + found := false + + for i, n := range new { + if o == n { + shared = append(shared, o) + found = true + sharedNew[i] = true + break + } + } + + if !found { + onlyOld = append(onlyOld, o) + } + } + + for i, shared := range sharedNew { + if !shared { + onlyNew = append(onlyNew, new[i]) + } + } + + return +} + +func diffCadenceValues(old, new []interpreter.Value) (onlyOld, onlyNew, shared []interpreter.Value) { + onlyOld = make([]interpreter.Value, 0, len(old)) + onlyNew = make([]interpreter.Value, 0, len(new)) + shared = make([]interpreter.Value, 0, min(len(old), len(new))) + + sharedNew := make([]bool, len(new)) + + for _, o := range old { + found := false + + for i, n := range new { + foundShared := false + + if ev, ok := o.(interpreter.EquatableValue); ok { + if ev.Equal(nil, interpreter.EmptyLocationRange, n) { + foundShared = true + } + } else { + if o == n { + foundShared = true + } + } + + if foundShared { + shared = append(shared, o) + found = true + sharedNew[i] = true + break + } + } + + if !found { + onlyOld = append(onlyOld, o) + } + } + + for i, shared := range sharedNew { + if !shared { + onlyNew = append(onlyNew, new[i]) + } + } + + return +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/cmd/util/ledger/migrations/cadence_value_diff_test.go b/cmd/util/ledger/migrations/cadence_value_diff_test.go new file mode 100644 index 00000000000..20e5899810b --- /dev/null +++ b/cmd/util/ledger/migrations/cadence_value_diff_test.go @@ -0,0 +1,655 @@ +package migrations + +import ( + "fmt" + "testing" + + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + + "github.com/stretchr/testify/require" +) + +func TestDiffCadenceValues(t *testing.T) { + address, err := common.HexToAddress("0x1") + require.NoError(t, err) + + domain := common.PathDomainStorage.Identifier() + + t.Run("no diff", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, true) + + diffReporter.DiffStates( + createTestPayloads(t, address, domain), + createTestPayloads(t, address, domain), + []string{domain}, + ) + require.NoError(t, err) + require.Equal(t, 0, len(writer.entries)) + }) + + t.Run("one storage map doesn't exist", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, true) + + diffReporter.DiffStates( + createTestPayloads(t, address, domain), + nil, + []string{domain}, + ) + require.NoError(t, err) + require.Equal(t, 1, len(writer.entries)) + + diff := writer.entries[0].(difference) + require.Equal(t, diffKindString[storageMapExistDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + }) + + t.Run("storage maps have different sets of keys", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, true) + + diffReporter.DiffStates( + createTestPayloads(t, address, domain), + createStorageMapPayloads(t, address, domain, []string{"unique_key"}, []interpreter.Value{interpreter.UInt64Value(0)}), + []string{domain}, + ) + require.NoError(t, err) + + // 2 differences: + // - unique keys in old storage map + // - unique keys in new storage map + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[storageMapKeyDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + } + }) + + t.Run("storage maps have overlapping keys", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, true) + + diffReporter.DiffStates( + createStorageMapPayloads(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(0), interpreter.UInt64Value(0)}), + createStorageMapPayloads(t, address, domain, []string{"2", "0"}, []interpreter.Value{interpreter.UInt64Value(0), interpreter.UInt64Value(0)}), + []string{domain}, + ) + require.NoError(t, err) + + // 2 entries: + // - unique keys in old storage map + // - unique keys in new storage map + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[storageMapKeyDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + } + }) + + t.Run("storage maps have one different value", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, false) + + diffReporter.DiffStates( + createStorageMapPayloads(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(100), interpreter.UInt64Value(101)}), + createStorageMapPayloads(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(111), interpreter.UInt64Value(101)}), + []string{domain}, + ) + require.NoError(t, err) + + // 1 entries: + // - different value + require.Equal(t, 1, len(writer.entries)) + + diff := writer.entries[0].(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.Equal(t, "storage[0]", diff.Trace) + require.Equal(t, "100", diff.OldValue) + require.Equal(t, "111", diff.NewValue) + }) + + t.Run("storage maps have multiple different values", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, false) + + diffReporter.DiffStates( + createStorageMapPayloads(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(100), interpreter.UInt64Value(101)}), + createStorageMapPayloads(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(111), interpreter.UInt64Value(102)}), + []string{domain}, + ) + require.NoError(t, err) + + // 2 entries with 2 different values: + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.True(t, diff.Trace == "storage[0]" || diff.Trace == "storage[1]") + } + }) + + t.Run("nested array value has different elements", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, false) + + createPayloads := func(arrayValues []interpreter.Value) []*ledger.Payload { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + mr, err := NewMigratorRuntime( + address, + []*ledger.Payload{accountStatusPayload}, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetStorageMap(mr.Address, domain, true) + + nestedArray := interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeUInt64, + }, + address, + arrayValues..., + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedArray, + ), + ) + + err = mr.Storage.Commit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + return payloads + } + + diffReporter.DiffStates( + createPayloads([]interpreter.Value{interpreter.UInt64Value(0), interpreter.UInt64Value(2), interpreter.UInt64Value(4)}), + createPayloads([]interpreter.Value{interpreter.UInt64Value(1), interpreter.UInt64Value(3), interpreter.UInt64Value(5)}), + []string{domain}, + ) + require.NoError(t, err) + + // 3 entries: + // - different value + require.Equal(t, 3, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0][0]" || diff.Trace == "storage[key_0][0][1]" || diff.Trace == "storage[key_0][0][2]") + + switch diff.Trace { + case "storage[key_0][0][0]": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0][1]": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + + case "storage[key_0][0][2]": + require.Equal(t, "4", diff.OldValue) + require.Equal(t, "5", diff.NewValue) + } + } + }) + + t.Run("nested dict value has different elements", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, false) + + createPayloads := func(dictValues []interpreter.Value) []*ledger.Payload { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + mr, err := NewMigratorRuntime( + address, + []*ledger.Payload{accountStatusPayload}, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetStorageMap(mr.Address, domain, true) + + nestedDict := interpreter.NewDictionaryValueWithAddress( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + dictValues..., + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedDict, + ), + ) + + err = mr.Storage.Commit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + return payloads + } + + diffReporter.DiffStates( + createPayloads( + []interpreter.Value{interpreter.NewUnmeteredStringValue("dict_key_0"), + interpreter.UInt64Value(0), + interpreter.NewUnmeteredStringValue("dict_key_1"), + interpreter.UInt64Value(2), + }), + createPayloads( + []interpreter.Value{interpreter.NewUnmeteredStringValue("dict_key_0"), + interpreter.UInt64Value(1), + interpreter.NewUnmeteredStringValue("dict_key_1"), + interpreter.UInt64Value(3), + }), + []string{domain}, + ) + require.NoError(t, err) + + // 2 entries: + // - different value + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0][\"dict_key_0\"]" || diff.Trace == "storage[key_0][0][\"dict_key_1\"]") + + switch diff.Trace { + case "storage[key_0][0][\"dict_key_0\"]": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0][\"dict_key_1\"]": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + }) + + t.Run("nested composite value has different elements", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, false) + + createPayloads := func(compositeFields []string, compositeValues []interpreter.Value) []*ledger.Payload { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + mr, err := NewMigratorRuntime( + address, + []*ledger.Payload{accountStatusPayload}, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetStorageMap(mr.Address, domain, true) + + var fields []interpreter.CompositeField + + for i, fieldName := range compositeFields { + fields = append(fields, interpreter.CompositeField{Name: fieldName, Value: compositeValues[i]}) + } + + nestedComposite := interpreter.NewCompositeValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + common.StringLocation("test"), + "Test", + common.CompositeKindStructure, + fields, + address, + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedComposite, + ), + ) + + err = mr.Storage.Commit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + return payloads + } + + diffReporter.DiffStates( + createPayloads( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(0), + interpreter.UInt64Value(2), + }), + createPayloads( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(1), + interpreter.UInt64Value(3), + }), + []string{domain}, + ) + require.NoError(t, err) + + // 2 entries: + // - different value + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0].Field_0" || diff.Trace == "storage[key_0][0].Field_1") + + switch diff.Trace { + case "storage[key_0][0].Field_0": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0].Field_1": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + }) + + t.Run("nested composite value has different elements with verbose logging", func(t *testing.T) { + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, writer, true) + + createPayloads := func(compositeFields []string, compositeValues []interpreter.Value) []*ledger.Payload { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + mr, err := NewMigratorRuntime( + address, + []*ledger.Payload{accountStatusPayload}, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetStorageMap(mr.Address, domain, true) + + var fields []interpreter.CompositeField + + for i, fieldName := range compositeFields { + fields = append(fields, interpreter.CompositeField{Name: fieldName, Value: compositeValues[i]}) + } + + nestedComposite := interpreter.NewCompositeValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + common.StringLocation("test"), + "Test", + common.CompositeKindStructure, + fields, + address, + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedComposite, + ), + ) + + err = mr.Storage.Commit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + return payloads + } + + diffReporter.DiffStates( + createPayloads( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(0), + interpreter.UInt64Value(2), + }), + createPayloads( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(1), + interpreter.UInt64Value(3), + }), + []string{domain}, + ) + require.NoError(t, err) + + // 3 entries: + // - 2 different values + // - verbose logging of storage map element + require.Equal(t, 3, len(writer.entries)) + + // Test 2 cadence value diff logs + for _, entry := range writer.entries[:2] { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0].Field_0" || diff.Trace == "storage[key_0][0].Field_1") + + switch diff.Trace { + case "storage[key_0][0].Field_0": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0].Field_1": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + + // Test storge map value diff log (only with verbose logging) + diff := writer.entries[2].(difference) + require.Equal(t, diffKindString[storageMapValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain, diff.Domain) + require.Equal(t, "storage[key_0]", diff.Trace) + require.Equal(t, "[S.test.Test(Field_1: 2, Field_0: 0)]", diff.OldValue) + require.Equal(t, "[S.test.Test(Field_1: 3, Field_0: 1)]", diff.NewValue) + }) +} + +func createStorageMapPayloads(t *testing.T, address common.Address, domain string, keys []string, values []interpreter.Value) []*ledger.Payload { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + mr, err := NewMigratorRuntime( + address, + []*ledger.Payload{accountStatusPayload}, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetStorageMap(mr.Address, domain, true) + + for i, k := range keys { + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(k), + values[i], + ) + } + + err = mr.Storage.Commit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + return payloads +} diff --git a/cmd/util/ledger/migrations/cadence_values_migration.go b/cmd/util/ledger/migrations/cadence_values_migration.go index d699cd4491b..e5c9a76f212 100644 --- a/cmd/util/ledger/migrations/cadence_values_migration.go +++ b/cmd/util/ledger/migrations/cadence_values_migration.go @@ -8,16 +8,16 @@ import ( "errors" - "github.com/onflow/cadence/migrations/statictypes" - "github.com/onflow/cadence/runtime" - "github.com/rs/zerolog" - "github.com/onflow/cadence/migrations" "github.com/onflow/cadence/migrations/capcons" "github.com/onflow/cadence/migrations/entitlements" + "github.com/onflow/cadence/migrations/statictypes" "github.com/onflow/cadence/migrations/string_normalization" + "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" + cadenceErrors "github.com/onflow/cadence/runtime/errors" "github.com/onflow/cadence/runtime/interpreter" + "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/cmd/util/ledger/util" @@ -32,6 +32,8 @@ type CadenceBaseMigrator struct { name string log zerolog.Logger reporter reporters.ReportWriter + diffReporter reporters.ReportWriter + logVerboseDiff bool valueMigrations func( inter *interpreter.Interpreter, accounts environment.Accounts, @@ -47,6 +49,11 @@ var _ io.Closer = (*CadenceBaseMigrator)(nil) func (m *CadenceBaseMigrator) Close() error { // Close the report writer so it flushes to file. m.reporter.Close() + + if m.diffReporter != nil { + m.diffReporter.Close() + } + return nil } @@ -118,6 +125,8 @@ func (m *CadenceBaseMigrator) MigrateAccount( oldPayloads []*ledger.Payload, ) ([]*ledger.Payload, error) { + checkPayloadsOwnership(oldPayloads, address, m.log) + // Create all the runtime components we need for the migration migrationRuntime, err := NewMigratorRuntime( @@ -129,13 +138,21 @@ func (m *CadenceBaseMigrator) MigrateAccount( return nil, fmt.Errorf("failed to create migrator runtime: %w", err) } + storage := migrationRuntime.Storage + migration := migrations.NewStorageMigration( migrationRuntime.Interpreter, - migrationRuntime.Storage, + storage, ) reporter := newValueMigrationReporter(m.reporter, m.log, m.errorMessageHandler) + valueMigrations := m.valueMigrations( + migrationRuntime.Interpreter, + migrationRuntime.Accounts, + reporter, + ) + migration.Migrate( &migrations.AddressSliceIterator{ Addresses: []common.Address{ @@ -144,7 +161,7 @@ func (m *CadenceBaseMigrator) MigrateAccount( }, migration.NewValueMigrationsPathMigrator( reporter, - m.valueMigrations(migrationRuntime.Interpreter, migrationRuntime.Accounts, reporter)..., + valueMigrations..., ), ) @@ -153,6 +170,11 @@ func (m *CadenceBaseMigrator) MigrateAccount( return nil, fmt.Errorf("failed to commit changes: %w", err) } + err = storage.CheckHealth() + if err != nil { + m.log.Err(err).Msg("storage health check failed") + } + // finalize the transaction result, err := migrationRuntime.TransactionState.FinalizeMainTransaction() if err != nil { @@ -160,24 +182,84 @@ func (m *CadenceBaseMigrator) MigrateAccount( } // Merge the changes to the original payloads. - return MergeRegisterChanges( + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(address): {}, + } + + newPayloads, err := MergeRegisterChanges( migrationRuntime.Snapshot.Payloads, result.WriteSet, + expectedAddresses, + expectedAddresses, m.log, ) + if err != nil { + return nil, fmt.Errorf("failed to merge register changes: %w", err) + } + + if m.diffReporter != nil { + + accountDiffReporter := NewCadenceValueDiffReporter(address, m.diffReporter, m.logVerboseDiff) + + accountDiffReporter.DiffStates(oldPayloads, newPayloads, domains) + } + + return newPayloads, nil +} + +func checkPayloadsOwnership(payloads []*ledger.Payload, address common.Address, log zerolog.Logger) { + for _, payload := range payloads { + checkPayloadOwnership(payload, address, log) + } +} + +func checkPayloadOwnership(payload *ledger.Payload, address common.Address, log zerolog.Logger) { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + log.Error().Err(err).Msg("failed to convert payload to register") + return + } + + owner := registerID.Owner + + if len(owner) > 0 { + payloadAddress, err := common.BytesToAddress([]byte(owner)) + if err != nil { + log.Error().Err(err).Msgf("failed to convert register owner to address: %x", owner) + return + } + + if payloadAddress != address { + log.Error().Msgf( + "payload address %s does not match expected address %s", + payloadAddress, + address, + ) + } + } } // NewCadence1ValueMigrator creates a new CadenceBaseMigrator // which runs some of the Cadence value migrations (static types, entitlements, strings) func NewCadence1ValueMigrator( rwf reporters.ReportWriterFactory, + diffMigrations bool, + logVerboseDiff bool, errorMessageHandler *errorMessageHandler, compositeTypeConverter statictypes.CompositeTypeConverterFunc, interfaceTypeConverter statictypes.InterfaceTypeConverterFunc, ) *CadenceBaseMigrator { + + var diffReporter reporters.ReportWriter + if diffMigrations { + diffReporter = rwf.ReportWriter("cadence-value-migration-diff") + } + return &CadenceBaseMigrator{ - name: "cadence-value-migration", - reporter: rwf.ReportWriter("cadence-value-migrator"), + name: "cadence-value-migration", + reporter: rwf.ReportWriter("cadence-value-migrator"), + diffReporter: diffReporter, + logVerboseDiff: logVerboseDiff, valueMigrations: func( inter *interpreter.Interpreter, _ environment.Accounts, @@ -200,12 +282,21 @@ func NewCadence1ValueMigrator( // It populates the given map with the IDs of the capability controller it issues. func NewCadence1LinkValueMigrator( rwf reporters.ReportWriterFactory, + diffMigrations bool, + logVerboseDiff bool, errorMessageHandler *errorMessageHandler, capabilityMapping *capcons.CapabilityMapping, ) *CadenceBaseMigrator { + var diffReporter reporters.ReportWriter + if diffMigrations { + diffReporter = rwf.ReportWriter("cadence-link-value-migration-diff") + } + return &CadenceBaseMigrator{ - name: "cadence-link-value-migration", - reporter: rwf.ReportWriter("cadence-link-value-migrator"), + name: "cadence-link-value-migration", + reporter: rwf.ReportWriter("cadence-link-value-migrator"), + diffReporter: diffReporter, + logVerboseDiff: logVerboseDiff, valueMigrations: func( _ *interpreter.Interpreter, accounts environment.Accounts, @@ -234,12 +325,21 @@ func NewCadence1LinkValueMigrator( // generated by the link value migration. func NewCadence1CapabilityValueMigrator( rwf reporters.ReportWriterFactory, + diffMigrations bool, + logVerboseDiff bool, errorMessageHandler *errorMessageHandler, capabilityMapping *capcons.CapabilityMapping, ) *CadenceBaseMigrator { + var diffReporter reporters.ReportWriter + if diffMigrations { + diffReporter = rwf.ReportWriter("cadence-capability-value-migration-diff") + } + return &CadenceBaseMigrator{ - name: "cadence-capability-value-migration", - reporter: rwf.ReportWriter("cadence-capability-value-migrator"), + name: "cadence-capability-value-migration", + reporter: rwf.ReportWriter("cadence-capability-value-migrator"), + diffReporter: diffReporter, + logVerboseDiff: logVerboseDiff, valueMigrations: func( _ *interpreter.Interpreter, _ environment.Accounts, @@ -263,7 +363,7 @@ type errorMessageHandler struct { reportedProgramLoadingErrors sync.Map } -func (t *errorMessageHandler) FormatError(err error) string { +func (t *errorMessageHandler) FormatError(err error) (message string, showStack bool) { // Only report program loading errors once, // omit full error message for subsequent occurrences @@ -273,11 +373,13 @@ func (t *errorMessageHandler) FormatError(err error) string { location := programLoadingError.Location _, ok := t.reportedProgramLoadingErrors.LoadOrStore(location, struct{}{}) if ok { - return "error getting program" + return "error getting program", false } + + return err.Error(), false } - return err.Error() + return err.Error(), true } // cadenceValueMigrationReporter is the reporter for cadence value migrations @@ -317,26 +419,30 @@ func (t *cadenceValueMigrationReporter) Migrated( func (t *cadenceValueMigrationReporter) Error(err error) { - message := t.errorMessageHandler.FormatError(err) - var migrationErr migrations.StorageMigrationError - if errors.As(err, &migrationErr) { - storageKey := migrationErr.StorageKey - storageMapKey := migrationErr.StorageMapKey - migration := migrationErr.Migration - - t.log.Error().Msgf( - "failed to run %s in account %s, domain %s, key %s: %s", - migration, - storageKey.Address, - storageKey.Key, - storageMapKey, - message, - ) - } else { - t.log.Error().Msgf("failed to run migration: %s", message) + if !errors.As(err, &migrationErr) { + panic(cadenceErrors.NewUnreachableError()) } + + message, showStack := t.errorMessageHandler.FormatError(migrationErr.Err) + + storageKey := migrationErr.StorageKey + storageMapKey := migrationErr.StorageMapKey + migration := migrationErr.Migration + + if showStack && len(migrationErr.Stack) > 0 { + message = fmt.Sprintf("%s\n%s", message, migrationErr.Stack) + } + + t.log.Error().Msgf( + "failed to run %s in account %s, domain %s, key %s: %s", + migration, + storageKey.Address, + storageKey.Key, + storageMapKey, + message, + ) } func (t *cadenceValueMigrationReporter) MigratedPathCapability( diff --git a/cmd/util/ledger/migrations/cadence_values_migration_test.go b/cmd/util/ledger/migrations/cadence_values_migration_test.go index 4196572010f..832efb65577 100644 --- a/cmd/util/ledger/migrations/cadence_values_migration_test.go +++ b/cmd/util/ledger/migrations/cadence_values_migration_test.go @@ -2,15 +2,14 @@ package migrations import ( _ "embed" + "encoding/json" "fmt" "io" "sync" "testing" - "github.com/rs/zerolog" - _ "github.com/glebarez/go-sqlite" - + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -59,14 +58,14 @@ func TestCadenceValuesMigration(t *testing.T) { logWriter := &writer{} logger := zerolog.New(logWriter).Level(zerolog.ErrorLevel) - // TODO: >1 breaks atree storage map iteration - // and requires LinkValueMigration.LinkValueMigration to be thread-safe - const nWorker = 1 + const nWorker = 2 const chainID = flow.Emulator // TODO: EVM contract is not deployed in snapshot yet, so can't update it const evmContractChange = EVMContractChangeNone + const burnerContractChange = BurnerContractChangeDeploy + stagedContracts := []StagedContract{ { Contract: Contract{ @@ -82,13 +81,23 @@ func TestCadenceValuesMigration(t *testing.T) { rwf, nWorker, chainID, + false, + false, evmContractChange, + burnerContractChange, stagedContracts, + false, ) for _, migration := range migrations { - payloads, err = migration(payloads) - require.NoError(t, err, "migration failed, logs: %v", logWriter.logs) + payloads, err = migration.Migrate(payloads) + require.NoError( + t, + err, + "migration `%s` failed, logs: %v", + migration.Name, + logWriter.logs, + ) } // Assert the migrated payloads @@ -687,3 +696,189 @@ func (r *testReportWriter) Write(entry any) { } func (r *testReportWriter) Close() {} + +func TestBootstrappedStateMigration(t *testing.T) { + t.Parallel() + + rwf := &testReportWriterFactory{} + + logWriter := &writer{} + logger := zerolog.New(logWriter).Level(zerolog.ErrorLevel) + + const nWorker = 2 + + const chainID = flow.Emulator + // TODO: EVM contract is not deployed in snapshot yet, so can't update it + const evmContractChange = EVMContractChangeNone + + const burnerContractChange = BurnerContractChangeUpdate + + payloads, err := newBootstrapPayloads(chainID) + require.NoError(t, err) + + migrations := NewCadence1Migrations( + logger, + rwf, + nWorker, + chainID, + false, + false, + evmContractChange, + burnerContractChange, + nil, + false, + ) + + for _, migration := range migrations { + payloads, err = migration.Migrate(payloads) + require.NoError( + t, + err, + "migration `%s` failed, logs: %v", + migration.Name, + logWriter.logs, + ) + } + + // Check error logs. + require.Empty(t, logWriter.logs) +} + +func TestProgramParsingError(t *testing.T) { + t.Parallel() + + rwf := &testReportWriterFactory{} + + logWriter := &writer{} + logger := zerolog.New(logWriter).Level(zerolog.ErrorLevel) + + const nWorker = 2 + + const chainID = flow.Emulator + chain := chainID.Chain() + + testAddress := common.Address(chain.ServiceAddress()) + + // TODO: EVM contract is not deployed in snapshot yet, so can't update it + const evmContractChange = EVMContractChangeNone + + const burnerContractChange = BurnerContractChangeUpdate + + payloads, err := newBootstrapPayloads(chainID) + require.NoError(t, err) + + runtime, err := NewMigratorRuntime( + testAddress, + payloads, + util.RuntimeInterfaceConfig{}, + ) + require.NoError(t, err) + + storage := runtime.Storage + + storageMap := storage.GetStorageMap( + testAddress, + common.PathDomainStorage.Identifier(), + true, + ) + + const contractName = "C" + contractLocation := common.NewAddressLocation(nil, testAddress, contractName) + + const nonExistingStructQualifiedIdentifier = contractName + ".NonExistingStruct" + + capabilityValue := interpreter.NewUnmeteredCapabilityValue( + 0, + interpreter.AddressValue(testAddress), + interpreter.NewReferenceStaticType( + nil, + interpreter.UnauthorizedAccess, + interpreter.NewCompositeStaticType( + nil, + contractLocation, + nonExistingStructQualifiedIdentifier, + contractLocation.TypeID(nil, nonExistingStructQualifiedIdentifier), + ), + ), + ) + + storageMap.WriteValue( + runtime.Interpreter, + interpreter.StringStorageMapKey("test"), + capabilityValue, + ) + + err = storage.Commit(runtime.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := runtime.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + // Merge the changes to the original payloads. + + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(testAddress): {}, + } + + payloads, err = MergeRegisterChanges( + runtime.Snapshot.Payloads, + result.WriteSet, + expectedAddresses, + nil, + logger, + ) + require.NoError(t, err) + + // Set the code for the old program + + payloads = append( + payloads, + newContractPayload( + testAddress, + contractName, + []byte(`pub contract C {}`), + ), + ) + + // Migrate + + migrations := NewCadence1Migrations( + logger, + rwf, + nWorker, + chainID, + false, + false, + evmContractChange, + burnerContractChange, + nil, + false, + ) + + for _, migration := range migrations { + payloads, err = migration.Migrate(payloads) + require.NoError( + t, + err, + "migration `%s` failed, logs: %v", + migration.Name, + logWriter.logs, + ) + } + + // Check error logs + require.Len(t, logWriter.logs, 1) + + log := logWriter.logs[0] + + var entry struct { + Message string `json:"message"` + } + + err = json.Unmarshal([]byte(log), &entry) + require.NoError(t, err) + + assert.Contains(t, entry.Message, "`pub` is no longer a valid access keyword") + assert.NotContains(t, entry.Message, "runtime/debug.Stack()") +} diff --git a/cmd/util/ledger/migrations/change_contract_code_migration.go b/cmd/util/ledger/migrations/change_contract_code_migration.go index 3408fbc1248..c9808f38542 100644 --- a/cmd/util/ledger/migrations/change_contract_code_migration.go +++ b/cmd/util/ledger/migrations/change_contract_code_migration.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/cadence/runtime/common" coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" + "github.com/rs/zerolog" evm "github.com/onflow/flow-go/fvm/evm/stdlib" "github.com/onflow/flow-go/fvm/systemcontracts" @@ -17,9 +18,9 @@ type ChangeContractCodeMigration struct { var _ AccountBasedMigration = (*ChangeContractCodeMigration)(nil) -func NewChangeContractCodeMigration(chainID flow.ChainID) *ChangeContractCodeMigration { +func NewChangeContractCodeMigration(chainID flow.ChainID, log zerolog.Logger) *ChangeContractCodeMigration { return &ChangeContractCodeMigration{ - StagedContractsMigration: NewStagedContractsMigration(chainID). + StagedContractsMigration: NewStagedContractsMigration(chainID, log). // TODO: //WithContractUpdateValidation(). WithName("ChangeContractCodeMigration"), @@ -47,8 +48,17 @@ const ( EVMContractChangeFull ) +type BurnerContractChange uint8 + +const ( + BurnerContractChangeNone BurnerContractChange = iota + BurnerContractChangeDeploy + BurnerContractChangeUpdate +) + type SystemContractChangesOptions struct { - EVM EVMContractChange + EVM EVMContractChange + Burner BurnerContractChange } func BurnerAddressForChain(chainID flow.ChainID) flow.Address { @@ -243,14 +253,29 @@ func SystemContractChanges(chainID flow.ChainID, options SystemContractChangesOp panic(fmt.Errorf("unsupported EVM contract change option: %d", options.EVM)) } + // Burner contract + if options.Burner == BurnerContractChangeUpdate { + contractChanges = append( + contractChanges, + StagedContract{ + Address: common.Address(flow.HexToAddress(env.BurnerAddress)), + Contract: Contract{ + Name: "Burner", + Code: coreContracts.Burner(), + }, + }, + ) + } + return contractChanges } -func NewSystemContactsMigration( +func NewSystemContractsMigration( chainID flow.ChainID, + log zerolog.Logger, options SystemContractChangesOptions, ) *ChangeContractCodeMigration { - migration := NewChangeContractCodeMigration(chainID) + migration := NewChangeContractCodeMigration(chainID, log) for _, change := range SystemContractChanges(chainID, options) { migration.RegisterContractChange(change) } diff --git a/cmd/util/ledger/migrations/change_contract_code_migration_test.go b/cmd/util/ledger/migrations/change_contract_code_migration_test.go index d23aefbc802..53147a7bb70 100644 --- a/cmd/util/ledger/migrations/change_contract_code_migration_test.go +++ b/cmd/util/ledger/migrations/change_contract_code_migration_test.go @@ -57,8 +57,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("no contracts", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -75,8 +77,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("1 contract - dont migrate", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -97,8 +101,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("1 contract - migrate", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -129,8 +135,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("2 contracts - migrate 1", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -163,8 +171,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("2 contracts - migrate 2", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -206,8 +216,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("2 contracts on different accounts - migrate 1", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -240,8 +252,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("not all contracts on one account migrated", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) @@ -276,8 +290,10 @@ func TestChangeContractCodeMigration(t *testing.T) { t.Run("not all accounts migrated", func(t *testing.T) { t.Parallel() - migration := migrations.NewChangeContractCodeMigration(flow.Emulator) log := zerolog.New(zerolog.NewTestWriter(t)) + + migration := migrations.NewChangeContractCodeMigration(flow.Emulator, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) diff --git a/cmd/util/ledger/migrations/deploy_migration.go b/cmd/util/ledger/migrations/deploy_migration.go index e68de9ce746..0bc82e6f08a 100644 --- a/cmd/util/ledger/migrations/deploy_migration.go +++ b/cmd/util/ledger/migrations/deploy_migration.go @@ -6,62 +6,15 @@ import ( coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/rs/zerolog" - "github.com/onflow/flow-go/cmd/util/ledger/util" - "github.com/onflow/flow-go/engine/execution/computation" - "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" ) -func NewTransactionBasedMigration( - tx *flow.TransactionBody, - chainID flow.ChainID, - logger zerolog.Logger, -) ledger.Migration { - return func(payloads []*ledger.Payload) ([]*ledger.Payload, error) { - - options := computation.DefaultFVMOptions(chainID, false, false) - options = append(options, - fvm.WithContractDeploymentRestricted(false), - fvm.WithContractRemovalRestricted(false), - fvm.WithAuthorizationChecksEnabled(false), - fvm.WithSequenceNumberCheckAndIncrementEnabled(false), - fvm.WithTransactionFeesEnabled(false)) - ctx := fvm.NewContext(options...) - - snapshot, err := util.NewPayloadSnapshot(payloads) - if err != nil { - return nil, err - } - - vm := fvm.NewVirtualMachine() - - executionSnapshot, res, err := vm.Run( - ctx, - fvm.Transaction(tx, 0), - snapshot, - ) - - if err != nil { - return nil, err - } - - if res.Err != nil { - return nil, res.Err - } - - return MergeRegisterChanges( - snapshot.Payloads, - executionSnapshot.WriteSet, - logger, - ) - } -} - func NewDeploymentMigration( chainID flow.ChainID, contract Contract, authorizer flow.Address, + expectedWriteAddresses map[flow.Address]struct{}, logger zerolog.Logger, ) ledger.Migration { @@ -79,21 +32,29 @@ func NewDeploymentMigration( AddArgument(jsoncdc.MustEncode(cadence.String(contract.Code))). AddAuthorizer(authorizer) - return NewTransactionBasedMigration(tx, chainID, logger) + return NewTransactionBasedMigration( + tx, + chainID, + logger, + expectedWriteAddresses, + ) } func NewBurnerDeploymentMigration( chainID flow.ChainID, logger zerolog.Logger, ) ledger.Migration { - + address := BurnerAddressForChain(chainID) return NewDeploymentMigration( chainID, Contract{ Name: "Burner", Code: coreContracts.Burner(), }, - BurnerAddressForChain(chainID), + address, + map[flow.Address]struct{}{ + address: {}, + }, logger, ) } diff --git a/cmd/util/ledger/migrations/deploy_migration_test.go b/cmd/util/ledger/migrations/deploy_migration_test.go new file mode 100644 index 00000000000..0ee9e88db49 --- /dev/null +++ b/cmd/util/ledger/migrations/deploy_migration_test.go @@ -0,0 +1,167 @@ +package migrations + +import ( + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func newBootstrapPayloads( + chainID flow.ChainID, + bootstrapProcedureOptions ...fvm.BootstrapProcedureOption, +) ([]*ledger.Payload, error) { + + ctx := fvm.NewContext( + fvm.WithChain(chainID.Chain()), + ) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + bootstrapProcedure := fvm.Bootstrap( + unittest.ServiceAccountPublicKey, + bootstrapProcedureOptions..., + ) + + executionSnapshot, _, err := vm.Run( + ctx, + bootstrapProcedure, + storageSnapshot, + ) + if err != nil { + return nil, err + } + + payloads := make([]*ledger.Payload, 0, len(executionSnapshot.WriteSet)) + + for registerID, registerValue := range executionSnapshot.WriteSet { + payloadKey := convert.RegisterIDToLedgerKey(registerID) + payload := ledger.NewPayload(payloadKey, registerValue) + payloads = append(payloads, payload) + } + + return payloads, nil +} + +func TestDeploy(t *testing.T) { + t.Parallel() + + const chainID = flow.Emulator + + chain := chainID.Chain() + + systemContracts := systemcontracts.SystemContractsForChain(chainID) + serviceAccountAddress := systemContracts.FlowServiceAccount.Address + fungibleTokenAddress := systemContracts.FungibleToken.Address + + targetAddress := serviceAccountAddress + + migration := NewDeploymentMigration( + chainID, + Contract{ + Name: "NewContract", + Code: []byte(fmt.Sprintf( + ` + import FungibleToken from %s + + access(all) + contract NewContract { + + access(all) + fun answer(): Int { + return 42 + } + } + `, + fungibleTokenAddress.HexWithPrefix(), + )), + }, + targetAddress, + map[flow.Address]struct{}{ + targetAddress: {}, + }, + zerolog.New(zerolog.NewTestWriter(t)), + ) + + bootstrapPayloads, err := newBootstrapPayloads(chainID) + require.NoError(t, err) + + filteredPayloads := make([]*ledger.Payload, 0, len(bootstrapPayloads)) + + // TODO: move to NewTransactionBasedMigration + + // Filter the bootstrapped payloads to only include the target account (service account) + // and the account where the fungible token is deployed + + for _, payload := range bootstrapPayloads { + registerID, _, err := convert.PayloadToRegister(payload) + require.NoError(t, err) + + if len(registerID.Owner) > 0 { + registerAddress := flow.Address([]byte(registerID.Owner)) + switch registerAddress { + case targetAddress, fungibleTokenAddress: + filteredPayloads = append(filteredPayloads, payload) + } + } else { + filteredPayloads = append(filteredPayloads, payload) + } + } + + newPayloads, err := migration(filteredPayloads) + require.NoError(t, err) + + txBody := flow.NewTransactionBody(). + SetScript([]byte(fmt.Sprintf( + ` + import NewContract from %s + + transaction { + execute { + log(NewContract.answer()) + } + } + `, + targetAddress.HexWithPrefix(), + ))) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + for _, newPayload := range newPayloads { + registerID, registerValue, err := convert.PayloadToRegister(newPayload) + require.NoError(t, err) + + storageSnapshot[registerID] = registerValue + } + + ctx := fvm.NewContext( + fvm.WithChain(chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithCadenceLogging(true), + ) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + storageSnapshot, + ) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Logs, 1) + require.Equal(t, "42", output.Logs[0]) +} diff --git a/cmd/util/ledger/migrations/merge.go b/cmd/util/ledger/migrations/merge.go index 458ba984ebe..81efe0a9fa5 100644 --- a/cmd/util/ledger/migrations/merge.go +++ b/cmd/util/ledger/migrations/merge.go @@ -11,6 +11,8 @@ import ( func MergeRegisterChanges( originalPayloads map[flow.RegisterID]*ledger.Payload, changes map[flow.RegisterID]flow.RegisterValue, + expectedChangeAddresses map[flow.Address]struct{}, + expectedOriginalAddresses map[flow.Address]struct{}, logger zerolog.Logger, ) ([]*ledger.Payload, error) { @@ -18,9 +20,25 @@ func MergeRegisterChanges( // Add all new payloads. for id, value := range changes { + delete(originalPayloads, id) if len(value) == 0 { continue } + + if expectedChangeAddresses != nil { + ownerAddress := flow.BytesToAddress([]byte(id.Owner)) + + if _, ok := expectedChangeAddresses[ownerAddress]; !ok { + // something was changed that does not belong to this account. Log it. + logger.Error(). + Str("key", id.String()). + Str("actual_address", ownerAddress.Hex()). + Interface("expected_addresses", expectedChangeAddresses). + Hex("value", value). + Msg("key is part of the change set, but is for a different account") + } + } + key := convert.RegisterIDToLedgerKey(id) newPayloads = append(newPayloads, ledger.NewPayload(key, value)) } @@ -33,10 +51,18 @@ func MergeRegisterChanges( continue } - // If the payload had changed, then it has been added earlier. - // So skip old payload. - if _, contains := changes[id]; contains { - continue + if expectedOriginalAddresses != nil { + ownerAddress := flow.BytesToAddress([]byte(id.Owner)) + + if _, ok := expectedOriginalAddresses[ownerAddress]; !ok { + // something was changed that does not belong to this account. Log it. + logger.Error(). + Str("key", id.String()). + Str("actual_address", ownerAddress.Hex()). + Interface("expected_addresses", expectedOriginalAddresses). + Hex("value", value.Value()). + Msg("key is part of the original set, but is for a different account") + } } newPayloads = append(newPayloads, value) diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go index 8218ee52b8f..074d0428d77 100644 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ b/cmd/util/ledger/migrations/migrator_runtime.go @@ -80,7 +80,7 @@ func NewMigratorRuntime( ri, runtime.NewCodesAndPrograms(), runtimeStorage, - runtime.NewCoverageReport(), + nil, ) inter, err := interpreter.NewInterpreter( diff --git a/cmd/util/ledger/migrations/prune_migration.go b/cmd/util/ledger/migrations/prune_migration.go index 3b694965568..780f526ef07 100644 --- a/cmd/util/ledger/migrations/prune_migration.go +++ b/cmd/util/ledger/migrations/prune_migration.go @@ -1,12 +1,20 @@ package migrations import ( + "fmt" + + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + "github.com/rs/zerolog" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" ) -// PruneMigration removes all the payloads with empty value +// PruneEmptyMigration removes all the payloads with empty value // this prunes the trie for values that has been deleted -func PruneMigration(payload []ledger.Payload) ([]ledger.Payload, error) { +func PruneEmptyMigration(payload []ledger.Payload) ([]ledger.Payload, error) { newPayload := make([]ledger.Payload, 0, len(payload)) for _, p := range payload { if len(p.Value()) > 0 { @@ -15,3 +23,86 @@ func PruneMigration(payload []ledger.Payload) ([]ledger.Payload, error) { } return newPayload, nil } + +// NewCadence1PruneMigration prunes some values from the service account in the Testnet state +func NewCadence1PruneMigration(chainID flow.ChainID, log zerolog.Logger) ledger.Migration { + if chainID != flow.Testnet { + return nil + } + + serviceAccountAddress := common.Address(chainID.Chain().ServiceAddress()) + + migrate := func(storage *runtime.Storage, inter *interpreter.Interpreter) error { + + err := pruneRandomBeaconHistory(storage, inter, log, serviceAccountAddress) + if err != nil { + return err + } + + return nil + } + + return NewAccountStorageMigration( + serviceAccountAddress, + log, + migrate, + ) +} + +func pruneRandomBeaconHistory( + storage *runtime.Storage, + inter *interpreter.Interpreter, + log zerolog.Logger, + serviceAccountAddress common.Address, +) error { + + log.Info().Msgf("pruning RandomBeaconHistory in service account %s", serviceAccountAddress) + + contracts := storage.GetStorageMap(serviceAccountAddress, runtime.StorageDomainContract, false) + if contracts == nil { + return fmt.Errorf("failed to get contracts storage map") + } + + randomBeaconHistory, ok := contracts.ReadValue( + nil, + interpreter.StringStorageMapKey("RandomBeaconHistory"), + ).(*interpreter.CompositeValue) + if !ok { + return fmt.Errorf("failed to read RandomBeaconHistory contract") + } + + randomSourceHistory, ok := randomBeaconHistory.GetField( + inter, + interpreter.EmptyLocationRange, + "randomSourceHistory", + ).(*interpreter.ArrayValue) + if !ok { + return fmt.Errorf("failed to read randomSourceHistory field") + } + + // Remove all but the last value from the randomSourceHistory + oldCount := randomSourceHistory.Count() + removalCount := oldCount - 1 + + for i := 0; i < removalCount; i++ { + randomSourceHistory.RemoveWithoutTransfer( + inter, + interpreter.EmptyLocationRange, + // NOTE: always remove the first element + 0, + ) + } + + // Check + if randomSourceHistory.Count() != 1 { + return fmt.Errorf("failed to prune randomSourceHistory") + } + + log.Info().Msgf( + "pruned %d entries in RandomBeaconHistory in service account %s", + removalCount, + serviceAccountAddress, + ) + + return nil +} diff --git a/cmd/util/ledger/migrations/staged_contracts_migration.go b/cmd/util/ledger/migrations/staged_contracts_migration.go index 829d7fdb9f8..fe3e5f9ad98 100644 --- a/cmd/util/ledger/migrations/staged_contracts_migration.go +++ b/cmd/util/ledger/migrations/staged_contracts_migration.go @@ -49,9 +49,10 @@ type Contract struct { var _ AccountBasedMigration = &StagedContractsMigration{} -func NewStagedContractsMigration(chainID flow.ChainID) *StagedContractsMigration { +func NewStagedContractsMigration(chainID flow.ChainID, log zerolog.Logger) *StagedContractsMigration { return &StagedContractsMigration{ name: "StagedContractsMigration", + log: log, chainID: chainID, stagedContracts: map[common.Address]map[flow.RegisterID]Contract{}, contractsByLocation: map[common.Location][]byte{}, @@ -77,7 +78,7 @@ func (m *StagedContractsMigration) Close() error { var sb strings.Builder sb.WriteString("failed to find all contract registers that need to be changed:\n") for address, contracts := range m.stagedContracts { - _, _ = fmt.Fprintf(&sb, "- address: %s\n", address) + _, _ = fmt.Fprintf(&sb, "- address: %s\n", address.HexWithPrefix()) for registerID := range contracts { _, _ = fmt.Fprintf(&sb, " - %s\n", flow.RegisterIDContractName(registerID)) } @@ -102,7 +103,7 @@ func (m *StagedContractsMigration) InitMigration( // Manually register burner contract burnerLocation := common.AddressLocation{ Name: "Burner", - Address: common.Address(m.chainID.Chain().ServiceAddress()), + Address: common.Address(BurnerAddressForChain(m.chainID)), } m.contractsByLocation[burnerLocation] = coreContracts.Burner() @@ -121,6 +122,18 @@ func (m *StagedContractsMigration) RegisterContractChange(change StagedContract) defer m.mutex.Unlock() address := change.Address + + chain := m.chainID.Chain() + + if _, err := chain.IndexFromAddress(flow.Address(address)); err != nil { + m.log.Error().Msgf( + "invalid contract update: invalid address for chain %s: %s (%s)", + m.chainID, + address.HexWithPrefix(), + change.Name, + ) + } + if _, ok := m.stagedContracts[address]; !ok { m.stagedContracts[address] = map[flow.RegisterID]Contract{} } @@ -167,13 +180,15 @@ func (m *StagedContractsMigration) contractUpdatesForAccount( func (m *StagedContractsMigration) MigrateAccount( _ context.Context, address common.Address, - payloads []*ledger.Payload, + oldPayloads []*ledger.Payload, ) ([]*ledger.Payload, error) { + checkPayloadsOwnership(oldPayloads, address, m.log) + contractUpdates, ok := m.contractUpdatesForAccount(address) if !ok { // no contracts to change on this address - return payloads, nil + return oldPayloads, nil } elaborations := map[common.Location]*sema.Elaboration{} @@ -190,12 +205,12 @@ func (m *StagedContractsMigration) MigrateAccount( }, } - mr, err := NewMigratorRuntime(address, payloads, config) + mr, err := NewMigratorRuntime(address, oldPayloads, config) if err != nil { return nil, err } - for payloadIndex, payload := range payloads { + for payloadIndex, payload := range oldPayloads { key, err := payload.Key() if err != nil { return nil, err @@ -231,13 +246,13 @@ func (m *StagedContractsMigration) MigrateAccount( if err != nil { m.log.Error().Err(err). Msgf( - "fail to update contract %s in account %s", + "failed to update contract %s in account %s", name, address.HexWithPrefix(), ) } else { // change contract code - payloads[payloadIndex] = ledger.NewPayload( + oldPayloads[payloadIndex] = ledger.NewPayload( key, newCode, ) @@ -250,14 +265,18 @@ func (m *StagedContractsMigration) MigrateAccount( if len(contractUpdates) > 0 { var sb strings.Builder - _, _ = fmt.Fprintf(&sb, "failed to find all contract registers that need to be changed for address %s:\n", address) + _, _ = fmt.Fprintf( + &sb, + "failed to find all contract registers that need to be changed for address %s:\n", + address.HexWithPrefix(), + ) for registerID := range contractUpdates { _, _ = fmt.Fprintf(&sb, " - %s\n", flow.RegisterIDContractName(registerID)) } return nil, fmt.Errorf(sb.String()) } - return payloads, nil + return oldPayloads, nil } func (m *StagedContractsMigration) checkContractUpdateValidity( diff --git a/cmd/util/ledger/migrations/staged_contracts_migration_test.go b/cmd/util/ledger/migrations/staged_contracts_migration_test.go index 8962a0b2790..b43cc23fe49 100644 --- a/cmd/util/ledger/migrations/staged_contracts_migration_test.go +++ b/cmd/util/ledger/migrations/staged_contracts_migration_test.go @@ -42,13 +42,14 @@ func (l *logWriter) Write(bytes []byte) (int, error) { func TestStagedContractsMigration(t *testing.T) { t.Parallel() - address1, err := common.HexToAddress("0x1") - require.NoError(t, err) + chainID := flow.Emulator + addressGenerator := chainID.Chain().NewAddressGenerator() - address2, err := common.HexToAddress("0x2") + address1, err := addressGenerator.NextAddress() require.NoError(t, err) - ctx := context.Background() + address2, err := addressGenerator.NextAddress() + require.NoError(t, err) t.Run("one contract", func(t *testing.T) { t.Parallel() @@ -62,21 +63,24 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator) - migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) + migration.RegisterContractUpdates(stagedContracts) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads, err := migration.MigrateAccount(ctx, address1, + payloads, err := migration.MigrateAccount( + context.Background(), + common.Address(address1), []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode)), + newContractPayload(common.Address(address1), "A", []byte(oldCode)), }, ) require.NoError(t, err) @@ -102,22 +106,25 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator). + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} - log := zerolog.New(logWriter) err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads, err := migration.MigrateAccount(ctx, address1, + payloads, err := migration.MigrateAccount( + context.Background(), + common.Address(address1), []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode)), + newContractPayload(common.Address(address1), "A", []byte(oldCode)), }, ) require.NoError(t, err) @@ -145,22 +152,25 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator). + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} - log := zerolog.New(logWriter) err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads, err := migration.MigrateAccount(ctx, address1, + payloads, err := migration.MigrateAccount( + context.Background(), + common.Address(address1), []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode)), + newContractPayload(common.Address(address1), "A", []byte(oldCode)), }, ) require.NoError(t, err) @@ -191,30 +201,33 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode1), }, - Address: address1, + Address: common.Address(address1), }, { Contract: Contract{ Name: "B", Code: []byte(newCode2), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator). + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} - log := zerolog.New(logWriter) err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads, err := migration.MigrateAccount(ctx, address1, + payloads, err := migration.MigrateAccount( + context.Background(), + common.Address(address1), []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode1)), - newContractPayload(address1, "B", []byte(oldCode2)), + newContractPayload(common.Address(address1), "A", []byte(oldCode1)), + newContractPayload(common.Address(address1), "B", []byte(oldCode2)), }, ) require.NoError(t, err) @@ -244,40 +257,49 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode), }, - Address: address2, + Address: common.Address(address2), }, } - migration := NewStagedContractsMigration(flow.Emulator) - migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) + migration.RegisterContractUpdates(stagedContracts) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode)), - newContractPayload(address2, "A", []byte(oldCode)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address1), "A", []byte(oldCode)), + } + payloads2 := []*ledger.Payload{ + newContractPayload(common.Address(address2), "A", []byte(oldCode)), } // Run migration for account 1, // There are no staged updates for contracts in account 1. // So codes should not have been updated. - payloads, err = migration.MigrateAccount(ctx, address1, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + payloads1, + ) require.NoError(t, err) - require.Len(t, payloads, 2) - require.Equal(t, oldCode, string(payloads[0].Value())) - require.Equal(t, oldCode, string(payloads[1].Value())) + require.Len(t, payloads1, 1) + require.Equal(t, oldCode, string(payloads1[0].Value())) // Run migration for account 2 // There is one staged update for contracts in account 2. // So one payload/contract-code should be updated, and the other should remain the same. - payloads, err = migration.MigrateAccount(ctx, address2, payloads) + payloads2, err = migration.MigrateAccount( + context.Background(), + common.Address(address2), + payloads2, + ) require.NoError(t, err) - require.Len(t, payloads, 2) - require.Equal(t, oldCode, string(payloads[0].Value())) - require.Equal(t, newCode, string(payloads[1].Value())) + require.Len(t, payloads2, 1) + require.Equal(t, newCode, string(payloads2[0].Value())) err = migration.Close() require.NoError(t, err) @@ -299,29 +321,32 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(update1), }, - Address: address1, + Address: common.Address(address1), }, { Contract: Contract{ Name: "A", Code: []byte(update2), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator) - logWriter := &logWriter{} log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) migration.RegisterContractUpdates(stagedContracts) - payloads, err := migration.MigrateAccount(ctx, address1, + payloads, err := migration.MigrateAccount( + context.Background(), + common.Address(address1), []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCode)), + newContractPayload(common.Address(address1), "A", []byte(oldCode)), }, ) require.NoError(t, err) @@ -333,7 +358,7 @@ func TestStagedContractsMigration(t *testing.T) { require.Contains( t, logWriter.logs[0], - `existing staged update found for contract 0x0000000000000001.A. Previous update will be overwritten.`, + `existing staged update found`, ) require.Len(t, payloads, 1) @@ -351,21 +376,26 @@ func TestStagedContractsMigration(t *testing.T) { Name: "A", Code: []byte(newCode), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator) - logWriter := &logWriter{} log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) migration.RegisterContractUpdates(stagedContracts) // NOTE: no payloads - _, err = migration.MigrateAccount(ctx, address1, nil) + _, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + nil, + ) require.ErrorContains(t, err, "failed to find all contract registers that need to be changed") }) } @@ -373,13 +403,15 @@ func TestStagedContractsMigration(t *testing.T) { func TestStagedContractsWithImports(t *testing.T) { t.Parallel() - address1, err := common.HexToAddress("0x1") - require.NoError(t, err) + chainID := flow.Emulator + + addressGenerator := chainID.Chain().NewAddressGenerator() - address2, err := common.HexToAddress("0x2") + address1, err := addressGenerator.NextAddress() require.NoError(t, err) - ctx := context.Background() + address2, err := addressGenerator.NextAddress() + require.NoError(t, err) t.Run("valid import", func(t *testing.T) { t.Parallel() @@ -414,34 +446,45 @@ func TestStagedContractsWithImports(t *testing.T) { Name: "A", Code: []byte(newCodeA), }, - Address: address1, + Address: common.Address(address1), }, { Contract: Contract{ Name: "B", Code: []byte(newCodeB), }, - Address: address2, + Address: common.Address(address2), }, } - migration := NewStagedContractsMigration(flow.Emulator) - migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) + migration.RegisterContractUpdates(stagedContracts) + err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCodeA)), - newContractPayload(address2, "B", []byte(oldCodeB)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address1), "A", []byte(oldCodeA)), + } + payloads2 := []*ledger.Payload{ + newContractPayload(common.Address(address2), "B", []byte(oldCodeB)), } - payloads, err = migration.MigrateAccount(ctx, address1, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + payloads1, + ) require.NoError(t, err) - payloads, err = migration.MigrateAccount(ctx, address2, payloads) + payloads2, err = migration.MigrateAccount( + context.Background(), + common.Address(address2), + payloads2, + ) require.NoError(t, err) err = migration.Close() @@ -449,29 +492,33 @@ func TestStagedContractsWithImports(t *testing.T) { require.Empty(t, logWriter.logs) - require.Len(t, payloads, 2) - require.Equal(t, newCodeA, string(payloads[0].Value())) - require.Equal(t, newCodeB, string(payloads[1].Value())) + require.Len(t, payloads1, 1) + assert.Equal(t, newCodeA, string(payloads1[0].Value())) + + require.Len(t, payloads2, 1) + assert.Equal(t, newCodeB, string(payloads2[0].Value())) }) - t.Run("broken import", func(t *testing.T) { + t.Run("broken import, no update staged", func(t *testing.T) { t.Parallel() - oldCodeA := fmt.Sprintf(` - import B from %s - access(all) contract A {} - `, + oldCodeA := fmt.Sprintf( + ` + import B from %s + access(all) contract A {} + `, address2.HexWithPrefix(), ) oldCodeB := `pub contract B {} // not compatible` - newCodeA := fmt.Sprintf(` - import B from %s - access(all) contract A { - access(all) fun foo(a: B.C) {} - } - `, + newCodeA := fmt.Sprintf( + ` + import B from %s + access(all) contract A { + access(all) fun foo(a: B.C) {} + } + `, address2.HexWithPrefix(), ) @@ -481,28 +528,39 @@ func TestStagedContractsWithImports(t *testing.T) { Name: "A", Code: []byte(newCodeA), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator). + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} - log := zerolog.New(logWriter) err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCodeA)), - newContractPayload(address2, "B", []byte(oldCodeB)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address1), "A", []byte(oldCodeA)), + } + payloads2 := []*ledger.Payload{ + newContractPayload(common.Address(address2), "B", []byte(oldCodeB)), } - payloads, err = migration.MigrateAccount(ctx, address1, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + payloads1, + ) require.NoError(t, err) - payloads, err = migration.MigrateAccount(ctx, address2, payloads) + payloads2, err = migration.MigrateAccount( + context.Background(), + common.Address(address2), + payloads2, + ) require.NoError(t, err) err = migration.Close() @@ -512,13 +570,111 @@ func TestStagedContractsWithImports(t *testing.T) { require.Contains( t, logWriter.logs[0], + "cannot find declaration `B` in `ee82856bf20e2aa6.B`", + ) + + // Payloads should be the old ones + require.Len(t, payloads1, 1) + assert.Equal(t, oldCodeA, string(payloads1[0].Value())) + + require.Len(t, payloads2, 1) + assert.Equal(t, oldCodeB, string(payloads2[0].Value())) + }) + + t.Run("broken import ", func(t *testing.T) { + t.Parallel() + + oldCodeA := fmt.Sprintf( + ` + import B from %s + access(all) contract A {} + `, + address2.HexWithPrefix(), + ) + + oldCodeB := `pub contract B {} // not compatible` + + newCodeA := fmt.Sprintf( + ` + import B from %s + access(all) contract A { + access(all) fun foo(a: B.C) {} + } + `, + address2.HexWithPrefix(), + ) + + newCodeB := `pub contract B {} // not compatible` + + stagedContracts := []StagedContract{ + { + Contract: Contract{ + Name: "A", + Code: []byte(newCodeA), + }, + Address: common.Address(address1), + }, + { + Contract: Contract{ + Name: "B", + Code: []byte(newCodeB), + }, + Address: common.Address(address2), + }, + } + + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). + WithContractUpdateValidation() + migration.RegisterContractUpdates(stagedContracts) + + err := migration.InitMigration(log, nil, 0) + require.NoError(t, err) + + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address1), "A", []byte(oldCodeA)), + } + payloads2 := []*ledger.Payload{ + newContractPayload(common.Address(address2), "B", []byte(oldCodeB)), + } + + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + payloads1, + ) + require.NoError(t, err) + + payloads2, err = migration.MigrateAccount( + context.Background(), + common.Address(address2), + payloads2, + ) + require.NoError(t, err) + + err = migration.Close() + require.NoError(t, err) + + require.Len(t, logWriter.logs, 2) + assert.Contains( + t, + logWriter.logs[0], + "cannot find type in this scope: `B`", + ) + assert.Contains( + t, + logWriter.logs[1], "`pub` is no longer a valid access keyword", ) // Payloads should be the old ones - require.Len(t, payloads, 2) - require.Equal(t, oldCodeA, string(payloads[0].Value())) - require.Equal(t, oldCodeB, string(payloads[1].Value())) + require.Len(t, payloads1, 1) + assert.Equal(t, oldCodeA, string(payloads1[0].Value())) + + require.Len(t, payloads2, 1) + assert.Equal(t, oldCodeB, string(payloads2[0].Value())) }) t.Run("broken import in one, valid third contract", func(t *testing.T) { @@ -552,36 +708,48 @@ func TestStagedContractsWithImports(t *testing.T) { Name: "A", Code: []byte(newCodeA), }, - Address: address1, + Address: common.Address(address1), }, { Contract: Contract{ Name: "C", Code: []byte(newCodeC), }, - Address: address1, + Address: common.Address(address1), }, } - migration := NewStagedContractsMigration(flow.Emulator). + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log). WithContractUpdateValidation() migration.RegisterContractUpdates(stagedContracts) - logWriter := &logWriter{} - log := zerolog.New(logWriter) err := migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address1, "A", []byte(oldCodeA)), - newContractPayload(address2, "B", []byte(oldCodeB)), - newContractPayload(address1, "C", []byte(oldCodeC)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address1), "A", []byte(oldCodeA)), + newContractPayload(common.Address(address1), "C", []byte(oldCodeC)), + } + + payloads2 := []*ledger.Payload{ + newContractPayload(common.Address(address2), "B", []byte(oldCodeB)), } - payloads, err = migration.MigrateAccount(ctx, address1, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address1), + payloads1, + ) require.NoError(t, err) - payloads, err = migration.MigrateAccount(ctx, address2, payloads) + payloads2, err = migration.MigrateAccount( + context.Background(), + common.Address(address2), + payloads2, + ) require.NoError(t, err) err = migration.Close() @@ -591,17 +759,19 @@ func TestStagedContractsWithImports(t *testing.T) { require.Contains( t, logWriter.logs[0], - "`pub` is no longer a valid access keyword", + "cannot find declaration `B` in `ee82856bf20e2aa6.B`", ) // A and B should be the old ones. // C should be updated. // Type checking failures in unrelated contracts should not // stop other contracts from being migrated. - require.Len(t, payloads, 3) - require.Equal(t, oldCodeA, string(payloads[0].Value())) - require.Equal(t, oldCodeB, string(payloads[1].Value())) - require.Equal(t, newCodeC, string(payloads[2].Value())) + require.Len(t, payloads1, 2) + require.Equal(t, oldCodeA, string(payloads1[0].Value())) + require.Equal(t, newCodeC, string(payloads1[1].Value())) + + require.Len(t, payloads2, 1) + require.Equal(t, oldCodeB, string(payloads2[0].Value())) }) } @@ -706,10 +876,10 @@ func TestStagedContractsWithUpdateValidator(t *testing.T) { chainID := flow.Emulator systemContracts := systemcontracts.SystemContractsForChain(chainID) - address, err := common.HexToAddress("0x1") - require.NoError(t, err) + addressGenerator := chainID.Chain().NewAddressGenerator() - ctx := context.Background() + address, err := addressGenerator.NextAddress() + require.NoError(t, err) t.Run("FungibleToken.Vault", func(t *testing.T) { t.Parallel() @@ -753,25 +923,46 @@ func TestStagedContractsWithUpdateValidator(t *testing.T) { Name: "A", Code: []byte(newCodeA), }, - Address: address, + Address: common.Address(address), + }, + { + Contract: Contract{ + Name: "FungibleToken", + Code: []byte(ftContract), + }, + Address: ftAddress, }, } - migration := NewStagedContractsMigration(chainID) + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) migration.RegisterContractUpdates(stagedContracts) migration.WithContractUpdateValidation() - logWriter := &logWriter{} - log := zerolog.New(logWriter) err = migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address, "A", []byte(oldCodeA)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address), "A", []byte(oldCodeA)), + } + payloads2 := []*ledger.Payload{ newContractPayload(ftAddress, "FungibleToken", []byte(ftContract)), } - payloads, err = migration.MigrateAccount(ctx, address, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address), + payloads1, + ) + require.NoError(t, err) + + payloads2, err = migration.MigrateAccount( + context.Background(), + ftAddress, + payloads2, + ) require.NoError(t, err) err = migration.Close() @@ -779,8 +970,11 @@ func TestStagedContractsWithUpdateValidator(t *testing.T) { require.Empty(t, logWriter.logs) - require.Len(t, payloads, 2) - require.Equal(t, newCodeA, string(payloads[0].Value())) + require.Len(t, payloads1, 1) + assert.Equal(t, newCodeA, string(payloads1[0].Value())) + + require.Len(t, payloads2, 1) + assert.Equal(t, ftContract, string(payloads2[0].Value())) }) t.Run("other type", func(t *testing.T) { @@ -826,34 +1020,56 @@ func TestStagedContractsWithUpdateValidator(t *testing.T) { Name: "A", Code: []byte(newCodeA), }, - Address: address, + Address: common.Address(address), }, } - migration := NewStagedContractsMigration(chainID) + logWriter := &logWriter{} + log := zerolog.New(logWriter) + + migration := NewStagedContractsMigration(chainID, log) migration.RegisterContractUpdates(stagedContracts) migration.WithContractUpdateValidation() - logWriter := &logWriter{} - log := zerolog.New(logWriter) err = migration.InitMigration(log, nil, 0) require.NoError(t, err) - payloads := []*ledger.Payload{ - newContractPayload(address, "A", []byte(oldCodeA)), + payloads1 := []*ledger.Payload{ + newContractPayload(common.Address(address), "A", []byte(oldCodeA)), + } + + payloads2 := []*ledger.Payload{ newContractPayload(otherAddress, "FungibleToken", []byte(ftContract)), } - payloads, err = migration.MigrateAccount(ctx, address, payloads) + payloads1, err = migration.MigrateAccount( + context.Background(), + common.Address(address), + payloads1, + ) + require.NoError(t, err) + + payloads2, err = migration.MigrateAccount( + context.Background(), + otherAddress, + payloads2, + ) require.NoError(t, err) err = migration.Close() require.NoError(t, err) require.Len(t, logWriter.logs, 1) - assert.Contains(t, logWriter.logs[0], "cannot update contract `A`") + assert.Contains(t, + logWriter.logs[0], + "cannot find declaration `FungibleToken` in `0000000000000002.FungibleToken`", + ) + + require.Len(t, payloads1, 1) + assert.Equal(t, oldCodeA, string(payloads1[0].Value())) + + require.Len(t, payloads2, 1) + assert.Equal(t, ftContract, string(payloads2[0].Value())) - require.Len(t, payloads, 2) - require.Equal(t, oldCodeA, string(payloads[0].Value())) }) } diff --git a/cmd/util/ledger/migrations/transaction_migration.go b/cmd/util/ledger/migrations/transaction_migration.go new file mode 100644 index 00000000000..16ad691c4ed --- /dev/null +++ b/cmd/util/ledger/migrations/transaction_migration.go @@ -0,0 +1,59 @@ +package migrations + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +func NewTransactionBasedMigration( + tx *flow.TransactionBody, + chainID flow.ChainID, + logger zerolog.Logger, + expectedWriteAddresses map[flow.Address]struct{}, +) ledger.Migration { + return func(payloads []*ledger.Payload) ([]*ledger.Payload, error) { + + options := computation.DefaultFVMOptions(chainID, false, false) + options = append(options, + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(false), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionFeesEnabled(false)) + ctx := fvm.NewContext(options...) + + snapshot, err := util.NewPayloadSnapshot(payloads) + if err != nil { + return nil, err + } + + vm := fvm.NewVirtualMachine() + + executionSnapshot, res, err := vm.Run( + ctx, + fvm.Transaction(tx, 0), + snapshot, + ) + + if err != nil { + return nil, err + } + + if res.Err != nil { + return nil, res.Err + } + + return MergeRegisterChanges( + snapshot.Payloads, + executionSnapshot.WriteSet, + expectedWriteAddresses, + nil, + logger, + ) + } +} diff --git a/cmd/util/ledger/util/payload_file.go b/cmd/util/ledger/util/payload_file.go index 76d80a79cf5..1aad4a1bc10 100644 --- a/cmd/util/ledger/util/payload_file.go +++ b/cmd/util/ledger/util/payload_file.go @@ -314,7 +314,7 @@ func ReadPayloadFile(logger zerolog.Logger, payloadFile string) (bool, []*ledger return false, nil, fmt.Errorf("can't decode payload in CBOR: %w", err) } - payload, err := ledger.DecodePayloadWithoutPrefix(rawPayload, false, payloadEncodingVersion) + payload, err := ledger.DecodePayloadWithoutPrefix(rawPayload, true, payloadEncodingVersion) if err != nil { return false, nil, fmt.Errorf("can't decode payload 0x%x: %w", rawPayload, err) } diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index 688c79da614..3ac5fa46646 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -175,9 +175,10 @@ func (c SystemContracts) AsTemplateEnv() templates.Environment { FungibleTokenAddress: c.FungibleToken.Address.Hex(), FungibleTokenMetadataViewsAddress: c.FungibleToken.Address.Hex(), - NonFungibleTokenAddress: c.NonFungibleToken.Address.Hex(), - MetadataViewsAddress: c.MetadataViews.Address.Hex(), - ViewResolverAddress: c.ViewResolver.Address.Hex(), + NonFungibleTokenAddress: c.NonFungibleToken.Address.Hex(), + MetadataViewsAddress: c.MetadataViews.Address.Hex(), + ViewResolverAddress: c.ViewResolver.Address.Hex(), + FungibleTokenSwitchboardAddress: c.FungibleToken.Address.Hex(), } }