Skip to content

Commit

Permalink
Add test for and fix arbitrage swap execution output amount (#4287)
Browse files Browse the repository at this point in the history
## Describe your changes

This extends the existing arbitrage test to ensure that the created
`SwapExecution` has the expected trace and input/output values, as well
as fixes the bug described in #3790 where `SwapExecution`s were created
with incorrect `Output` values.

## Issue ticket number and link

#3790

## Checklist before requesting a review

- [x] If this code contains consensus-breaking changes, I have added the
"consensus-breaking" label.

## Additional Follow-up

This will require a migration to update the existing `SwapExecution`
structures. The migration should be as simple as adding the `input` to
the `output` for each of them.

---------

Signed-off-by: Chris Czub <[email protected]>
Co-authored-by: Erwan Or <[email protected]>
  • Loading branch information
zbuc and erwanor authored May 1, 2024
1 parent 4f99f3a commit 73c214d
Show file tree
Hide file tree
Showing 4 changed files with 216 additions and 114 deletions.
116 changes: 3 additions & 113 deletions crates/bin/pd/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
//! This module declares how local `pd` state should be altered, if at all,
//! in order to be compatible with the network post-chain-upgrade.
mod testnet72;
mod testnet74;

use anyhow::Context;
use futures::StreamExt as _;
use std::path::PathBuf;

use cnidarium::{EscapedByteSlice, StateDelta, StateRead, StateWrite, Storage};
use cnidarium::{StateDelta, StateRead, StateWrite, Storage};
use jmt::RootHash;
use penumbra_app::{app::StateReadExt, SUBSTORE_PREFIXES};
use penumbra_num::Amount;
use penumbra_sct::component::clock::{EpochManager, EpochRead};

use crate::testnet::generate::TestnetConfig;
Expand Down Expand Up @@ -202,117 +202,7 @@ impl Migration {
Ok(())
}
Migration::Testnet72 => testnet72::migrate(path_to_export, genesis_start).await,
Migration::Testnet74 => {
// Lookups for liquidity positions based on starting asset were ordered backwards
// and returning the positions with the least liquidity first. This migration
// needs to modify the keys stored under the JMT `dex/ra/` prefix key to reverse
// the ordering of the existing data.

// Setup:
let start_time = std::time::SystemTime::now();
let rocksdb_dir = path_to_export.join("rocksdb");
let storage =
Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?;
let export_state = storage.latest_snapshot();
let root_hash = export_state.root_hash().await.expect("can get root hash");
let pre_upgrade_root_hash: RootHash = root_hash.into();
let pre_upgrade_height = export_state
.get_block_height()
.await
.expect("can get block height");
let post_upgrade_height = pre_upgrade_height.wrapping_add(1);

// We initialize a `StateDelta` and start by reaching into the JMT for all entries matching the
// swap execution prefix. Then, we write each entry to the nv-storage.
let mut delta = StateDelta::new(export_state);

let prefix_key = "dex/ra/".as_bytes();
tracing::trace!(prefix_key = ?EscapedByteSlice(&prefix_key), "updating liquidity position indices");
let mut liquidity_stream = delta.nonverifiable_prefix_raw(&prefix_key).boxed();

while let Some(r) = liquidity_stream.next().await {
let (old_key, asset_id) = r?;
tracing::info!(?old_key, asset_id = ?EscapedByteSlice(&asset_id), "migrating asset liquidity");

// Construct the new key:
let mut new_key = [0u8; 55];
new_key[0..7].copy_from_slice(b"dex/ra/");
// The "from" asset ID remains the same in both keys.
new_key[7..32 + 7].copy_from_slice(&old_key[7..32 + 7]);
// Use the complement of the amount to ensure that the keys are ordered in descending order.
let a_from_b = Amount::from_be_bytes(old_key[32 + 7..32 + 7 + 16].try_into()?);
new_key[32 + 7..32 + 7 + 16].copy_from_slice(&(!a_from_b).to_be_bytes());

// Delete the old incorrectly ordered key:
delta.nonverifiable_delete(old_key.clone());

// Store the correctly formatted new key:
delta.nonverifiable_put_raw(new_key.to_vec(), asset_id);
tracing::info!(
new_key = ?EscapedByteSlice(&new_key),
?old_key,
"updated liquidity index"
);
}

delta.put_block_height(0u64);

let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

let migration_duration = start_time.elapsed().unwrap();

// Reload storage so we can make reads against its migrated state:
storage.release().await;
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
let migrated_state = storage.latest_snapshot();

// The migration is complete, now we need to generate a genesis file. To do this, we need
// to lookup a validator view from the chain, and specify the post-upgrade app hash and
// initial height.
let chain_id = migrated_state.get_chain_id().await?;
let app_state = penumbra_app::genesis::Content {
chain_id,
..Default::default()
};
let mut genesis =
TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = post_upgrade_root_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_upgrade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});
let checkpoint = post_upgrade_root_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));

let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = path_to_export.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");

let validator_state_path = path_to_export.join("priv_validator_state.json");
let fresh_validator_state =
crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state)
.expect("can write validator state");

tracing::info!(
pre_upgrade_height,
post_upgrade_height,
?pre_upgrade_root_hash,
?post_upgrade_root_hash,
duration = migration_duration.as_secs(),
"successful migration!"
);

Ok(())
}
Migration::Testnet74 => testnet74::migrate(path_to_export, genesis_start).await,
}
}
}
Expand Down
165 changes: 165 additions & 0 deletions crates/bin/pd/src/migrate/testnet74.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
//! Contains functions related to the migration script of Testnet74
use anyhow;
use cnidarium::{EscapedByteSlice, Snapshot, StateDelta, StateRead, StateWrite, Storage};
use futures::StreamExt as _;
use jmt::RootHash;
use penumbra_app::{app::StateReadExt as _, SUBSTORE_PREFIXES};
use penumbra_dex::SwapExecution;
use penumbra_num::Amount;
use penumbra_proto::{penumbra::core::component as pb, StateReadProto, StateWriteProto};
use penumbra_sct::component::clock::{EpochManager, EpochRead};
use std::path::PathBuf;

use crate::testnet::generate::TestnetConfig;

/// Updates arb execution output amounts to include the input amount instead
/// of reporting only profit (see #3790).
async fn fix_arb_execution_outputs(delta: &mut StateDelta<Snapshot>) -> anyhow::Result<()> {
let mut stream = delta.prefix_proto("dex/arb_execution/");
while let Some(r) = stream.next().await {
let (key, swap_ex_proto): (String, pb::dex::v1::SwapExecution) = r?;
let mut swap_ex: SwapExecution = swap_ex_proto.try_into()?;
swap_ex.output = swap_ex
.input
.asset_id
.value(swap_ex.output.amount + swap_ex.input.amount);
delta.put(key, swap_ex);
}
Ok(())
}

/// Update the ordering of liquidity position indices to return in descending order (see #4189)
///
/// Lookups for liquidity positions based on starting asset were ordered backwards
/// and returning the positions with the least liquidity first. This migration
/// needs to modify the keys stored under the JMT `dex/ra/` prefix key to reverse
/// the ordering of the existing data.
async fn update_lp_index_order(delta: &mut StateDelta<Snapshot>) -> anyhow::Result<()> {
let prefix_key = "dex/ra/".as_bytes();
tracing::trace!(prefix_key = ?EscapedByteSlice(&prefix_key), "updating liquidity position indices");
let mut liquidity_stream = delta.nonverifiable_prefix_raw(&prefix_key).boxed();

while let Some(r) = liquidity_stream.next().await {
let (old_key, asset_id): (Vec<u8>, Vec<u8>) = r?;
tracing::info!(?old_key, asset_id = ?EscapedByteSlice(&asset_id), "migrating asset liquidity");

// Construct the new key:
let mut new_key = [0u8; 55];
new_key[0..7].copy_from_slice(b"dex/ra/");
// The "from" asset ID remains the same in both keys.
new_key[7..32 + 7].copy_from_slice(&old_key[7..32 + 7]);
// Use the complement of the amount to ensure that the keys are ordered in descending order.
let a_from_b = Amount::from_be_bytes(old_key[32 + 7..32 + 7 + 16].try_into()?);
new_key[32 + 7..32 + 7 + 16].copy_from_slice(&(!a_from_b).to_be_bytes());

// Delete the old incorrectly ordered key:
delta.nonverifiable_delete(old_key.clone());

// Store the correctly formatted new key:
delta.nonverifiable_put_raw(new_key.to_vec(), asset_id);
tracing::info!(
new_key = ?EscapedByteSlice(&new_key),
?old_key,
"updated liquidity index"
);
}

Ok(())
}

/// Run the full migration, given an export path and a start time for genesis.
///
/// This migration script is responsible for:
///
/// - Updating the ordering of liquidity position indices to return in descending order (see #4189)
/// - Updating arb execution output amounts to include the input amount instead of reporting only profit (see #3790)
///
/// Affected JMT key prefixes:
///
/// - `dex/ra/`
/// - `dex/arb_execution/`
pub async fn migrate(
path_to_export: PathBuf,
genesis_start: Option<tendermint::time::Time>,
) -> anyhow::Result<()> {
// Setup:
let rocksdb_dir = path_to_export.join("rocksdb");
let storage = Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?;
let export_state = storage.latest_snapshot();
let root_hash = export_state.root_hash().await.expect("can get root hash");
let pre_upgrade_root_hash: RootHash = root_hash.into();
let pre_upgrade_height = export_state
.get_block_height()
.await
.expect("can get block height");
let post_upgrade_height = pre_upgrade_height.wrapping_add(1);

// We initialize a `StateDelta` and start by reaching into the JMT for all entries matching the
// swap execution prefix. Then, we write each entry to the nv-storage.
let mut delta = StateDelta::new(export_state);
let (migration_duration, post_upgrade_root_hash) = {
let start_time = std::time::SystemTime::now();

// Update LP index order.
update_lp_index_order(&mut delta).await?;

// Fix the arb execution output amounts.
fix_arb_execution_outputs(&mut delta).await?;

delta.put_block_height(0u64);
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

(start_time.elapsed().unwrap(), post_upgrade_root_hash)
};

tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

storage.release().await;
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
let migrated_state = storage.latest_snapshot();

// The migration is complete, now we need to generate a genesis file. To do this, we need
// to lookup a validator view from the chain, and specify the post-upgrade app hash and
// initial height.
let chain_id = migrated_state.get_chain_id().await?;
let app_state = penumbra_app::genesis::Content {
chain_id,
..Default::default()
};
let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = post_upgrade_root_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_upgrade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});
let checkpoint = post_upgrade_root_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));

let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = path_to_export.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");

let validator_state_path = path_to_export.join("priv_validator_state.json");
let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state");

tracing::info!(
pre_upgrade_height,
post_upgrade_height,
?pre_upgrade_root_hash,
?post_upgrade_root_hash,
duration = migration_duration.as_secs(),
"successful migration!"
);

Ok(())
}
2 changes: 1 addition & 1 deletion crates/core/component/dex/src/component/arb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ pub trait Arbitrage: StateWrite + Sized {
amount: filled_input,
},
output: Value {
amount: arb_profit,
amount: filled_input + arb_profit,
asset_id: arb_token,
},
};
Expand Down
47 changes: 47 additions & 0 deletions crates/core/component/dex/src/component/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -866,6 +866,12 @@ async fn basic_cycle_arb() -> anyhow::Result<()> {
/// The issue was that we did not treat the spill price as a strict
/// upper bound, which is necessary to ensure that the arbitrage logic
/// terminates.
///
/// This test also ensures that the created `SwapExecution` has the
///
/// *Arbitrage execution record bug:*
/// This test also ensures that the created `SwapExecution` has the
/// correct data. (See #3790).
async fn reproduce_arbitrage_loop_testnet_53() -> anyhow::Result<()> {
let _ = tracing_subscriber::fmt::try_init();
let storage = TempStorage::new().await?.apply_minimal_genesis().await?;
Expand Down Expand Up @@ -953,6 +959,47 @@ async fn reproduce_arbitrage_loop_testnet_53() -> anyhow::Result<()> {
tracing::info!("fetching the `ArbExecution`");
let arb_execution = state.arb_execution(0).await?.expect("arb was performed");
tracing::info!(?arb_execution, "fetched arb execution!");

// Validate that the arb execution has the correct data:
// Validate the traces.
assert_eq!(
arb_execution.traces,
vec![
vec![
penumbra.value(1u32.into()),
test_usd.value(110u32.into()),
Value {
amount: 1099999u64.into(),
asset_id: penumbra.id()
}
],
vec![
penumbra.value(1u32.into()),
test_usd.value(100u32.into()),
Value {
amount: 999999u64.into(),
asset_id: penumbra.id()
}
]
]
);

// Validate the input/output of the arb execution:
assert_eq!(
arb_execution.input,
Value {
amount: 2000000u64.into(),
asset_id: penumbra.id(),
}
);
assert_eq!(
arb_execution.output,
Value {
amount: 2099998u64.into(),
asset_id: penumbra.id(),
}
);

Ok(())
}

Expand Down

0 comments on commit 73c214d

Please sign in to comment.