From fdbfe54e67074eb84d5a4914178c3e5a248d7857 Mon Sep 17 00:00:00 2001 From: Dulat Rakhymkul <129622295+DudessaPr@users.noreply.github.com> Date: Tue, 9 Jul 2024 16:20:25 +0500 Subject: [PATCH] Removing conflicting crates (#1) * remove ztsd * remove lz4 * remove gz and fix others * remove filetime/tar * remove perf from runtime * fix accountsdb const * comment out solana perf * remove symlink * update cargo lock * remove flate2 again * retrun bz instead of hz * try brackets * revert return removed and comment --- Cargo.lock | 20 - account-decoder/Cargo.toml | 2 +- account-decoder/src/lib.rs | 54 +- accounts-db/Cargo.toml | 6 +- accounts-db/src/accounts_cache.rs | 1 + accounts-db/src/accounts_db.rs | 28 +- accounts-db/src/accounts_index.rs | 1 + accounts-db/src/active_stats.rs | 2 +- accounts-db/src/bucket_map_holder_stats.rs | 7 +- accounts-db/src/cache_hash_data_stats.rs | 1 + accounts-db/src/hardened_unpack.rs | 746 ++++++++++--------- accounts-db/src/read_only_accounts_cache.rs | 2 +- accounts-db/src/secondary_index.rs | 1 + accounts-db/src/tiered_storage/byte_block.rs | 66 +- accounts-db/src/tiered_storage/footer.rs | 2 +- accounts-db/src/tiered_storage/hot.rs | 3 +- download-utils/src/lib.rs | 6 +- runtime/Cargo.toml | 14 +- runtime/src/bank.rs | 15 +- runtime/src/bank_forks.rs | 1 + runtime/src/snapshot_config.rs | 2 +- runtime/src/snapshot_utils.rs | 391 +++++----- runtime/src/snapshot_utils/archive_format.rs | 92 +-- runtime/src/stakes.rs | 1 + 24 files changed, 737 insertions(+), 727 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47a20e89a6a27b..e1c869720b7747 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3270,16 +3270,6 @@ dependencies = [ "hashbrown 0.12.3", ] -[[package]] -name = "lz4" -version = "1.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6eab492fe7f8651add23237ea56dbf11b3c4ff762ab83d40a47f11433421f91" -dependencies = [ - "libc", - "lz4-sys", -] - [[package]] name = "lz4-sys" version = "1.9.5" @@ -5258,7 +5248,6 @@ dependencies = [ "spl-token-group-interface", "spl-token-metadata-interface", "thiserror", - "zstd", ] [[package]] @@ -5315,7 +5304,6 @@ dependencies = [ "blake3", "bv", "bytemuck", - "bzip2", "criterion", "crossbeam-channel", "dashmap", @@ -5326,7 +5314,6 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "lz4", "memmap2", "memoffset", "modular-bitfield", @@ -5358,7 +5345,6 @@ dependencies = [ "static_assertions", "strum", "strum_macros", - "tar", "tempfile", "test-case", "thiserror", @@ -6960,7 +6946,6 @@ dependencies = [ "bv", "bytemuck", "byteorder", - "bzip2", "crossbeam-channel", "dashmap", "dir-diff", @@ -6974,7 +6959,6 @@ dependencies = [ "libc", "libsecp256k1", "log", - "lz4", "memmap2", "memoffset", "mockall", @@ -7007,7 +6991,6 @@ dependencies = [ "solana-loader-v4-program", "solana-logger", "solana-measure", - "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime", @@ -7024,12 +7007,9 @@ dependencies = [ "static_assertions", "strum", "strum_macros", - "symlink", - "tar", "tempfile", "test-case", "thiserror", - "zstd", ] [[package]] diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 7aee8478b4f126..88b26a5351c279 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -26,7 +26,7 @@ spl-token-2022 = { workspace = true, features = ["no-entrypoint"] } spl-token-group-interface = { workspace = true } spl-token-metadata-interface = { workspace = true } thiserror = { workspace = true } -zstd = { workspace = true } +# zstd = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 2f9e00ce5ea625..5e5291e981aee3 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -27,7 +27,7 @@ use { pubkey::Pubkey, }, std::{ - io::{Read, Write}, + // io::{Read, Write}, str::FromStr, }, }; @@ -65,15 +65,15 @@ impl UiAccountData { UiAccountData::Binary(blob, encoding) => match encoding { UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(), UiAccountEncoding::Base64 => BASE64_STANDARD.decode(blob).ok(), - UiAccountEncoding::Base64Zstd => { - BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { - let mut data = vec![]; - zstd::stream::read::Decoder::new(zstd_data.as_slice()) - .and_then(|mut reader| reader.read_to_end(&mut data)) - .map(|_| data) - .ok() - }) - } + // UiAccountEncoding::Base64Zstd => { + // BASE64_STANDARD.decode(blob).ok().and_then(|zstd_data| { + // let mut data = vec![]; + // zstd::stream::read::Decoder::new(zstd_data.as_slice()) + // .and_then(|mut reader| reader.read_to_end(&mut data)) + // .map(|_| data) + // .ok() + // }) + // } UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None, }, } @@ -87,8 +87,8 @@ pub enum UiAccountEncoding { Base58, Base64, JsonParsed, - #[serde(rename = "base64+zstd")] - Base64Zstd, + // #[serde(rename = "base64+zstd")] + // Base64Zstd, // Unsupproted by svm-rollup } impl UiAccount { @@ -125,21 +125,21 @@ impl UiAccount { BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)), encoding, ), - UiAccountEncoding::Base64Zstd => { - let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); - match encoder - .write_all(slice_data(account.data(), data_slice_config)) - .and_then(|()| encoder.finish()) - { - Ok(zstd_data) => { - UiAccountData::Binary(BASE64_STANDARD.encode(zstd_data), encoding) - } - Err(_) => UiAccountData::Binary( - BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)), - UiAccountEncoding::Base64, - ), - } - } + // UiAccountEncoding::Base64Zstd => { + // let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); + // match encoder + // .write_all(slice_data(account.data(), data_slice_config)) + // .and_then(|()| encoder.finish()) + // { + // Ok(zstd_data) => { + // UiAccountData::Binary(BASE64_STANDARD.encode(zstd_data), encoding) + // } + // Err(_) => UiAccountData::Binary( + // BASE64_STANDARD.encode(slice_data(account.data(), data_slice_config)), + // UiAccountEncoding::Base64, + // ), + // } + // } UiAccountEncoding::JsonParsed => { if let Ok(parsed_data) = parse_account_data_v2(pubkey, account.owner(), account.data(), additional_data) diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index b3d259ef581f74..62632c24de696f 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -14,7 +14,7 @@ bincode = { workspace = true } blake3 = { workspace = true } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } -bzip2 = { workspace = true } +# bzip2 = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } index_list = { workspace = true } @@ -22,7 +22,7 @@ indexmap = { workspace = true } itertools = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } -lz4 = { workspace = true } +# lz4 = { workspace = true } memmap2 = { workspace = true } modular-bitfield = { workspace = true } num_cpus = { workspace = true } @@ -47,7 +47,7 @@ solana-stake-program = { workspace = true, optional = true } solana-svm = { workspace = true } solana-vote-program = { workspace = true, optional = true } static_assertions = { workspace = true } -tar = { workspace = true } +# tar = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index 3efbf0113b65f3..af470f81083a42 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -176,6 +176,7 @@ impl AccountsCache { is_frozen: AtomicBool::default(), }) } + #[allow(dead_code)] fn unique_account_writes_size(&self) -> u64 { self.cache .iter() diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 9fccc6065d19a8..c21f000975b235 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -22,6 +22,7 @@ mod geyser_plugin_utils; #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; +#[allow(unused_imports)] use { crate::{ account_info::{AccountInfo, StorageLocation}, @@ -666,6 +667,7 @@ struct SlotIndexGenerationInfo { } #[derive(Default, Debug)] +#[allow(dead_code)] struct GenerateIndexTimings { pub total_time_us: u64, pub index_time: u64, @@ -696,7 +698,7 @@ struct StorageSizeAndCount { type StorageSizeAndCountMap = DashMap; impl GenerateIndexTimings { - pub fn report(&self, startup_stats: &StartupStats) { + pub fn report(&self, _startup_stats: &StartupStats) { // datapoint_info!( // "generate_index", // ("overall_us", self.total_time_us, i64), @@ -1503,6 +1505,7 @@ pub struct AccountsDb { } #[derive(Debug, Default)] +#[allow(dead_code)] pub struct AccountsStats { delta_hash_scan_time_total_us: AtomicU64, delta_hash_accumulate_time_total_us: AtomicU64, @@ -1547,7 +1550,7 @@ pub struct PurgeStats { } impl PurgeStats { - fn report(&self, metric_name: &'static str, report_interval_ms: Option) { + fn report(&self, _metric_name: &'static str, report_interval_ms: Option) { let should_report = report_interval_ms .map(|report_interval_ms| self.last_report.should_update(report_interval_ms)) .unwrap_or(true); @@ -3258,7 +3261,7 @@ impl AccountsDb { } sort.stop(); - let total_keys_count = pubkeys.len(); + let _total_keys_count = pubkeys.len(); let mut accounts_scan = Measure::start("accounts_scan"); let uncleaned_roots = self.accounts_index.clone_uncleaned_roots(); let found_not_zero_accum = AtomicU64::new(0); @@ -4293,6 +4296,7 @@ impl AccountsDb { /// first tuple element: the filtered-down candidates and /// second duple element: the candidates which /// are skipped in this round and might be eligible for the future shrink. + #[allow(unused_variables)] fn select_candidates_by_total_usage( &self, shrink_slots: &ShrinkCandidates, @@ -4825,7 +4829,7 @@ impl AccountsDb { let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms"); let num_candidates = shrink_slots.len(); - let shrink_candidates_count = shrink_slots.len(); + let _shrink_candidates_count = shrink_slots.len(); self.thread_pool_clean.install(|| { shrink_slots .into_par_iter() @@ -5399,7 +5403,7 @@ impl AccountsDb { // The latest version of the account existed in the index, but could not be // fetched from storage. This means a race occurred between this function and clean // accounts/purge_slots - let message = format!( + let _message = format!( "do_load() failed to get key: {pubkey} from storage, latest attempt was for \ slot: {slot}, storage_location: {storage_location:?}, load_hint: {load_hint:?}", ); @@ -6307,7 +6311,7 @@ impl AccountsDb { // Note even if force_flush is false, we will still flush all roots <= the // given `requested_flush_root`, even if some of the later roots cannot be used for // cleaning due to an ongoing scan - let (total_new_cleaned_roots, num_cleaned_roots_flushed, mut flush_stats) = self + let (_total_new_cleaned_roots, _num_cleaned_roots_flushed, mut flush_stats) = self .flush_rooted_accounts_cache( requested_flush_root, Some((&mut account_bytes_saved, &mut num_accounts_saved)), @@ -6319,7 +6323,7 @@ impl AccountsDb { // banks // If 'should_aggressively_flush_cache', then flush the excess ones to storage - let (total_new_excess_roots, num_excess_roots_flushed, flush_stats_aggressively) = + let (_total_new_excess_roots, _num_excess_roots_flushed, flush_stats_aggressively) = if self.should_aggressively_flush_cache() { // Start by flushing the roots // @@ -6332,12 +6336,12 @@ impl AccountsDb { }; flush_stats.accumulate(&flush_stats_aggressively); - let mut excess_slot_count = 0; + let mut _excess_slot_count = 0; let mut unflushable_unrooted_slot_count = 0; let max_flushed_root = self.accounts_cache.fetch_max_flush_root(); if self.should_aggressively_flush_cache() { let old_slots = self.accounts_cache.cached_frozen_slots(); - excess_slot_count = old_slots.len(); + _excess_slot_count = old_slots.len(); let mut flush_stats = FlushStats::default(); old_slots.into_iter().for_each(|old_slot| { // Don't flush slots that are known to be unrooted @@ -6721,7 +6725,7 @@ impl AccountsDb { "total_stores: {total_count}, newest_slot: {newest_slot}, oldest_slot: {oldest_slot}" ); - let total_alive_ratio = if total_bytes > 0 { + let _total_alive_ratio = if total_bytes > 0 { total_alive_bytes as f64 / total_bytes as f64 } else { 0. @@ -6855,7 +6859,7 @@ impl AccountsDb { let total_lamports = *total_lamports.lock().unwrap(); let mut hash_time = Measure::start("hash"); - let (accumulated_hash, hash_total) = AccountsHasher::calculate_hash(account_hashes); + let (accumulated_hash, _hash_total) = AccountsHasher::calculate_hash(account_hashes); hash_time.stop(); // datapoint_info!( @@ -8414,7 +8418,7 @@ impl AccountsDb { fn report_store_timings(&self) { if self.stats.last_store_report.should_update(1000) { - let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats(); + let _read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats(); // datapoint_info!( // "accounts_db_store_timings", // ( diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 61fbd7115af7e7..db7ddf6ea2e44e 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -991,6 +991,7 @@ impl + Into> AccountsIndex { // Scan accounts and return latest version of each account that is either: // 1) rooted or // 2) present in ancestors + #[allow(unused_variables)] fn do_scan_accounts( &self, metric_name: &'static str, diff --git a/accounts-db/src/active_stats.rs b/accounts-db/src/active_stats.rs index 79e516981d4589..91252db5cbc7d4 100644 --- a/accounts-db/src/active_stats.rs +++ b/accounts-db/src/active_stats.rs @@ -64,7 +64,7 @@ impl ActiveStats { ActiveStatItem::HashMerkleTree => &self.hash_merkle, ActiveStatItem::HashScan => &self.hash_scan, }; - let value = modify_stat(stat); + let _value = modify_stat(stat); // match item { // ActiveStatItem::Clean => datapoint_info!("accounts_db_active", ("clean", value, i64)), // ActiveStatItem::SquashAncient => { diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 01dd2dfaa7660e..0012e7bb788ce2 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -154,7 +154,7 @@ impl BucketMapHolderStats { ) } } - + #[allow(dead_code)] fn calc_percent(ms: u64, elapsed_ms: u64) -> f32 { if elapsed_ms == 0 { 0.0 @@ -185,7 +185,8 @@ impl BucketMapHolderStats { + self.held_in_mem.ref_count.load(Ordering::Relaxed); in_mem.saturating_sub(held_in_mem) as usize } - + #[allow(unused_variables)] + #[allow(unused_mut)] pub fn report_stats + Into>( &self, storage: &BucketMapHolder, @@ -216,7 +217,7 @@ impl BucketMapHolderStats { .unwrap_or_default(); let in_mem_stats = Self::get_stats(in_mem_per_bucket_counts); let disk_stats = Self::get_stats(disk_per_bucket_counts); - + #[allow(dead_code)] const US_PER_MS: u64 = 1_000; // all metrics during startup are written to a different data point diff --git a/accounts-db/src/cache_hash_data_stats.rs b/accounts-db/src/cache_hash_data_stats.rs index 6f575070bee353..faf1789e350831 100644 --- a/accounts-db/src/cache_hash_data_stats.rs +++ b/accounts-db/src/cache_hash_data_stats.rs @@ -1,4 +1,5 @@ //! Cached data for hashing accounts +#[allow(unused_imports)] use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; #[derive(Default, Debug)] diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 27ce7b2772ec81..db9ca1ac4a545c 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -1,5 +1,6 @@ +#[allow(unused_imports)] use { - bzip2::bufread::BzDecoder, + // bzip2::bufread::BzDecoder, log::*, rand::{thread_rng, Rng}, solana_sdk::genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE}, @@ -13,10 +14,10 @@ use { }, time::Instant, }, - tar::{ - Archive, - EntryType::{Directory, GNUSparse, Regular}, - }, + // tar::{ + // Archive, + // EntryType::{Directory, GNUSparse, Regular}, + // }, thiserror::Error, }; @@ -35,16 +36,20 @@ pub type Result = std::result::Result; // note that this is directly related to the mmaped data size // so protect against insane value // This is the file size including holes for sparse files +#[allow(dead_code)] const MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE: u64 = 64 * 1024 * 1024 * 1024 * 1024; // 4 TiB; // This is the actually consumed disk usage for sparse files +#[allow(dead_code)] const MAX_SNAPSHOT_ARCHIVE_UNPACKED_ACTUAL_SIZE: u64 = 4 * 1024 * 1024 * 1024 * 1024; - +#[allow(dead_code)] const MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT: u64 = 5_000_000; pub const MAX_GENESIS_ARCHIVE_UNPACKED_SIZE: u64 = 10 * 1024 * 1024; // 10 MiB +#[allow(dead_code)] const MAX_GENESIS_ARCHIVE_UNPACKED_COUNT: u64 = 100; +#[allow(dead_code)] fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result { trace!( "checked_total_size_sum: {} + {} < {}", @@ -60,7 +65,7 @@ fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> } Ok(total_size) } - +#[allow(dead_code)] fn checked_total_count_increment(total_count: u64, limit_count: u64) -> Result { let total_count = total_count + 1; if total_count > limit_count { @@ -70,7 +75,7 @@ fn checked_total_count_increment(total_count: u64, limit_count: u64) -> Result Result<()> { if !unpack_result { return Err(UnpackError::Archive(format!("failed to unpack: {path:?}"))); @@ -85,137 +90,138 @@ pub enum UnpackPath<'a> { Invalid, } -fn unpack_archive<'a, A, C, D>( - archive: &mut Archive, - apparent_limit_size: u64, - actual_limit_size: u64, - limit_count: u64, - mut entry_checker: C, // checks if entry is valid - entry_processor: D, // processes entry after setting permissions -) -> Result<()> -where - A: Read, - C: FnMut(&[&str], tar::EntryType) -> UnpackPath<'a>, - D: Fn(PathBuf), -{ - let mut apparent_total_size: u64 = 0; - let mut actual_total_size: u64 = 0; - let mut total_count: u64 = 0; - - let mut total_entries = 0; - for entry in archive.entries()? { - let mut entry = entry?; - let path = entry.path()?; - let path_str = path.display().to_string(); - - // Although the `tar` crate safely skips at the actual unpacking, fail - // first by ourselves when there are odd paths like including `..` or / - // for our clearer pattern matching reasoning: - // https://docs.rs/tar/0.4.26/src/tar/entry.rs.html#371 - let parts = path - .components() - .map(|p| match p { - CurDir => Ok("."), - Normal(c) => c.to_str().ok_or(()), - _ => Err(()), // Prefix (for Windows) and RootDir are forbidden - }) - .collect::, _>>(); - - // Reject old-style BSD directory entries that aren't explicitly tagged as directories - let legacy_dir_entry = - entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); - let kind = entry.header().entry_type(); - let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); - let (Ok(parts), false) = (parts, reject_legacy_dir_entry) else { - return Err(UnpackError::Archive(format!( - "invalid path found: {path_str:?}" - ))); - }; - - let unpack_dir = match entry_checker(parts.as_slice(), kind) { - UnpackPath::Invalid => { - return Err(UnpackError::Archive(format!( - "extra entry found: {:?} {:?}", - path_str, - entry.header().entry_type(), - ))); - } - UnpackPath::Ignore => { - continue; - } - UnpackPath::Valid(unpack_dir) => unpack_dir, - }; - - apparent_total_size = checked_total_size_sum( - apparent_total_size, - entry.header().size()?, - apparent_limit_size, - )?; - actual_total_size = checked_total_size_sum( - actual_total_size, - entry.header().entry_size()?, - actual_limit_size, - )?; - total_count = checked_total_count_increment(total_count, limit_count)?; - - let account_filename = match parts.as_slice() { - ["accounts", account_filename] => Some(PathBuf::from(account_filename)), - _ => None, - }; - let entry_path = if let Some(account) = account_filename { - // Special case account files. We're unpacking an account entry inside one of the - // account_paths returned by `entry_checker`. We want to unpack into - // account_path/ instead of account_path/accounts/ so we strip the - // accounts/ prefix. - sanitize_path(&account, unpack_dir) - } else { - sanitize_path(&path, unpack_dir) - }?; // ? handles file system errors - let Some(entry_path) = entry_path else { - continue; // skip it - }; - - let unpack = entry.unpack(&entry_path); - check_unpack_result(unpack.map(|_unpack| true)?, path_str)?; - - // Sanitize permissions. - let mode = match entry.header().entry_type() { - GNUSparse | Regular => 0o644, - _ => 0o755, - }; - set_perms(&entry_path, mode)?; - - // Process entry after setting permissions - entry_processor(entry_path); - - total_entries += 1; - } - info!("unpacked {} entries total", total_entries); - - return Ok(()); - - #[cfg(unix)] - fn set_perms(dst: &Path, mode: u32) -> std::io::Result<()> { - use std::os::unix::fs::PermissionsExt; - - let perm = fs::Permissions::from_mode(mode as _); - fs::set_permissions(dst, perm) - } - - #[cfg(windows)] - fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { - let mut perm = fs::metadata(dst)?.permissions(); - // This is OK for Windows, but clippy doesn't realize we're doing this - // only on Windows. - #[allow(clippy::permissions_set_readonly_false)] - perm.set_readonly(false); - fs::set_permissions(dst, perm) - } -} +// fn unpack_archive<'a, A, C, D>( +// // archive: &mut Archive, +// apparent_limit_size: u64, +// actual_limit_size: u64, +// limit_count: u64, +// mut entry_checker: C, // checks if entry is valid +// entry_processor: D, // processes entry after setting permissions +// ) -> Result<()> +// where +// A: Read, +// C: FnMut(&[&str], tar::EntryType) -> UnpackPath<'a>, +// D: Fn(PathBuf), +// { +// let mut apparent_total_size: u64 = 0; +// let mut actual_total_size: u64 = 0; +// let mut total_count: u64 = 0; + +// let mut total_entries = 0; +// for entry in archive.entries()? { +// let mut entry = entry?; +// let path = entry.path()?; +// let path_str = path.display().to_string(); + +// // Although the `tar` crate safely skips at the actual unpacking, fail +// // first by ourselves when there are odd paths like including `..` or / +// // for our clearer pattern matching reasoning: +// // https://docs.rs/tar/0.4.26/src/tar/entry.rs.html#371 +// let parts = path +// .components() +// .map(|p| match p { +// CurDir => Ok("."), +// Normal(c) => c.to_str().ok_or(()), +// _ => Err(()), // Prefix (for Windows) and RootDir are forbidden +// }) +// .collect::, _>>(); + +// // Reject old-style BSD directory entries that aren't explicitly tagged as directories +// let legacy_dir_entry = +// entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); +// let kind = entry.header().entry_type(); +// let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); +// let (Ok(parts), false) = (parts, reject_legacy_dir_entry) else { +// return Err(UnpackError::Archive(format!( +// "invalid path found: {path_str:?}" +// ))); +// }; + +// let unpack_dir = match entry_checker(parts.as_slice(), kind) { +// UnpackPath::Invalid => { +// return Err(UnpackError::Archive(format!( +// "extra entry found: {:?} {:?}", +// path_str, +// entry.header().entry_type(), +// ))); +// } +// UnpackPath::Ignore => { +// continue; +// } +// UnpackPath::Valid(unpack_dir) => unpack_dir, +// }; + +// apparent_total_size = checked_total_size_sum( +// apparent_total_size, +// entry.header().size()?, +// apparent_limit_size, +// )?; +// actual_total_size = checked_total_size_sum( +// actual_total_size, +// entry.header().entry_size()?, +// actual_limit_size, +// )?; +// total_count = checked_total_count_increment(total_count, limit_count)?; + +// let account_filename = match parts.as_slice() { +// ["accounts", account_filename] => Some(PathBuf::from(account_filename)), +// _ => None, +// }; +// let entry_path = if let Some(account) = account_filename { +// // Special case account files. We're unpacking an account entry inside one of the +// // account_paths returned by `entry_checker`. We want to unpack into +// // account_path/ instead of account_path/accounts/ so we strip the +// // accounts/ prefix. +// sanitize_path(&account, unpack_dir) +// } else { +// sanitize_path(&path, unpack_dir) +// }?; // ? handles file system errors +// let Some(entry_path) = entry_path else { +// continue; // skip it +// }; + +// let unpack = entry.unpack(&entry_path); +// check_unpack_result(unpack.map(|_unpack| true)?, path_str)?; + +// // Sanitize permissions. +// let mode = match entry.header().entry_type() { +// // GNUSparse | Regular => 0o644, +// _ => 0o755, +// }; +// set_perms(&entry_path, mode)?; + +// // Process entry after setting permissions +// entry_processor(entry_path); + +// total_entries += 1; +// } +// info!("unpacked {} entries total", total_entries); + +// return Ok(()); + +// #[cfg(unix)] +// fn set_perms(dst: &Path, mode: u32) -> std::io::Result<()> { +// use std::os::unix::fs::PermissionsExt; + +// let perm = fs::Permissions::from_mode(mode as _); +// fs::set_permissions(dst, perm) +// } + +// #[cfg(windows)] +// fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { +// let mut perm = fs::metadata(dst)?.permissions(); +// // This is OK for Windows, but clippy doesn't realize we're doing this +// // only on Windows. +// #[allow(clippy::permissions_set_readonly_false)] +// perm.set_readonly(false); +// fs::set_permissions(dst, perm) +// } +// } // return Err on file system error // return Some(path) if path is good // return None if we should skip this file +#[allow(dead_code)] fn sanitize_path(entry_path: &Path, dst: &Path) -> Result> { // We cannot call unpack_in because it errors if we try to use 2 account paths. // So, this code is borrowed from unpack_in @@ -265,6 +271,7 @@ fn sanitize_path(entry_path: &Path, dst: &Path) -> Result> { // copied from: // https://github.com/alexcrichton/tar-rs/blob/d90a02f582c03dfa0fd11c78d608d0974625ae5d/src/entry.rs#L781 +#[allow(dead_code)] fn validate_inside_dst(dst: &Path, file_dst: &Path) -> Result { // Abort if target (canonical) parent is outside of `dst` let canon_parent = file_dst.canonicalize().map_err(|err| { @@ -302,105 +309,105 @@ impl ParallelSelector { } /// Unpacks snapshot and collects AppendVec file names & paths -pub fn unpack_snapshot( - archive: &mut Archive, - ledger_dir: &Path, - account_paths: &[PathBuf], - parallel_selector: Option, -) -> Result { - let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); - - unpack_snapshot_with_processors( - archive, - ledger_dir, - account_paths, - parallel_selector, - |file, path| { - unpacked_append_vec_map.insert(file.to_string(), path.join("accounts").join(file)); - }, - |_| {}, - ) - .map(|_| unpacked_append_vec_map) -} +// pub fn unpack_snapshot( +// archive: &mut Archive, +// ledger_dir: &Path, +// account_paths: &[PathBuf], +// parallel_selector: Option, +// ) -> Result { +// let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); + +// unpack_snapshot_with_processors( +// archive, +// ledger_dir, +// account_paths, +// parallel_selector, +// |file, path| { +// unpacked_append_vec_map.insert(file.to_string(), path.join("accounts").join(file)); +// }, +// |_| {}, +// ) +// .map(|_| unpacked_append_vec_map) +// } /// Unpacks snapshots and sends entry file paths through the `sender` channel -pub fn streaming_unpack_snapshot( - archive: &mut Archive, - ledger_dir: &Path, - account_paths: &[PathBuf], - parallel_selector: Option, - sender: &crossbeam_channel::Sender, -) -> Result<()> { - unpack_snapshot_with_processors( - archive, - ledger_dir, - account_paths, - parallel_selector, - |_, _| {}, - |entry_path_buf| { - if entry_path_buf.is_file() { - sender.send(entry_path_buf).unwrap(); - } - }, - ) -} - -fn unpack_snapshot_with_processors( - archive: &mut Archive, - ledger_dir: &Path, - account_paths: &[PathBuf], - parallel_selector: Option, - mut accounts_path_processor: F, - entry_processor: G, -) -> Result<()> -where - A: Read, - F: FnMut(&str, &Path), - G: Fn(PathBuf), -{ - assert!(!account_paths.is_empty()); - let mut i = 0; - - unpack_archive( - archive, - MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE, - MAX_SNAPSHOT_ARCHIVE_UNPACKED_ACTUAL_SIZE, - MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT, - |parts, kind| { - if is_valid_snapshot_archive_entry(parts, kind) { - i += 1; - match ¶llel_selector { - Some(parallel_selector) => { - if !parallel_selector.select_index(i - 1) { - return UnpackPath::Ignore; - } - } - None => {} - }; - if let ["accounts", file] = parts { - // Randomly distribute the accounts files about the available `account_paths`, - let path_index = thread_rng().gen_range(0..account_paths.len()); - match account_paths - .get(path_index) - .map(|path_buf| path_buf.as_path()) - { - Some(path) => { - accounts_path_processor(file, path); - UnpackPath::Valid(path) - } - None => UnpackPath::Invalid, - } - } else { - UnpackPath::Valid(ledger_dir) - } - } else { - UnpackPath::Invalid - } - }, - entry_processor, - ) -} - +// pub fn streaming_unpack_snapshot( +// archive: &mut Archive, +// ledger_dir: &Path, +// account_paths: &[PathBuf], +// parallel_selector: Option, +// sender: &crossbeam_channel::Sender, +// ) -> Result<()> { +// unpack_snapshot_with_processors( +// archive, +// ledger_dir, +// account_paths, +// parallel_selector, +// |_, _| {}, +// |entry_path_buf| { +// if entry_path_buf.is_file() { +// sender.send(entry_path_buf).unwrap(); +// } +// }, +// ) +// } + +// fn unpack_snapshot_with_processors( +// archive: &mut Archive, +// ledger_dir: &Path, +// account_paths: &[PathBuf], +// parallel_selector: Option, +// mut accounts_path_processor: F, +// entry_processor: G, +// ) -> Result<()> +// where +// A: Read, +// F: FnMut(&str, &Path), +// G: Fn(PathBuf), +// { +// assert!(!account_paths.is_empty()); +// let mut i = 0; + +// unpack_archive( +// archive, +// MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE, +// MAX_SNAPSHOT_ARCHIVE_UNPACKED_ACTUAL_SIZE, +// MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT, +// |parts, kind| { +// if is_valid_snapshot_archive_entry(parts, kind) { +// i += 1; +// match ¶llel_selector { +// Some(parallel_selector) => { +// if !parallel_selector.select_index(i - 1) { +// return UnpackPath::Ignore; +// } +// } +// None => {} +// }; +// if let ["accounts", file] = parts { +// // Randomly distribute the accounts files about the available `account_paths`, +// let path_index = thread_rng().gen_range(0..account_paths.len()); +// match account_paths +// .get(path_index) +// .map(|path_buf| path_buf.as_path()) +// { +// Some(path) => { +// accounts_path_processor(file, path); +// UnpackPath::Valid(path) +// } +// None => UnpackPath::Invalid, +// } +// } else { +// UnpackPath::Valid(ledger_dir) +// } +// } else { +// UnpackPath::Invalid +// } +// }, +// entry_processor, +// ) +// } +#[allow(dead_code)] fn all_digits(v: &str) -> bool { if v.is_empty() { return false; @@ -412,7 +419,7 @@ fn all_digits(v: &str) -> bool { } true } - +#[allow(dead_code)] fn like_storage(v: &str) -> bool { let mut periods = 0; let mut saw_numbers = false; @@ -434,21 +441,21 @@ fn like_storage(v: &str) -> bool { saw_numbers && periods == 1 } -fn is_valid_snapshot_archive_entry(parts: &[&str], kind: tar::EntryType) -> bool { - match (parts, kind) { - (["version"], Regular) => true, - (["accounts"], Directory) => true, - (["accounts", file], GNUSparse) if like_storage(file) => true, - (["accounts", file], Regular) if like_storage(file) => true, - (["snapshots"], Directory) => true, - (["snapshots", "status_cache"], GNUSparse) => true, - (["snapshots", "status_cache"], Regular) => true, - (["snapshots", dir, file], GNUSparse) if all_digits(dir) && all_digits(file) => true, - (["snapshots", dir, file], Regular) if all_digits(dir) && all_digits(file) => true, - (["snapshots", dir], Directory) if all_digits(dir) => true, - _ => false, - } -} +// fn is_valid_snapshot_archive_entry(parts: &[&str], kind: tar::EntryType) -> bool { +// match (parts, kind) { +// (["version"], Regular) => true, +// (["accounts"], Directory) => true, +// (["accounts", file], GNUSparse) if like_storage(file) => true, +// (["accounts", file], Regular) if like_storage(file) => true, +// (["snapshots"], Directory) => true, +// (["snapshots", "status_cache"], GNUSparse) => true, +// (["snapshots", "status_cache"], Regular) => true, +// (["snapshots", dir, file], GNUSparse) if all_digits(dir) && all_digits(file) => true, +// (["snapshots", dir, file], Regular) if all_digits(dir) && all_digits(file) => true, +// (["snapshots", dir], Directory) if all_digits(dir) => true, +// _ => false, +// } +// } #[derive(Error, Debug)] pub enum OpenGenesisConfigError { @@ -458,6 +465,7 @@ pub enum OpenGenesisConfigError { Load(#[from] std::io::Error), } +#[allow(unused_variables)] pub fn open_genesis_config( ledger_path: &Path, max_genesis_archive_unpacked_size: u64, @@ -471,140 +479,140 @@ pub fn open_genesis_config( ); let genesis_package = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); - unpack_genesis_archive( - &genesis_package, - ledger_path, - max_genesis_archive_unpacked_size, - )?; + // unpack_genesis_archive( + // &genesis_package, + // ledger_path, + // max_genesis_archive_unpacked_size, + // )?; GenesisConfig::load(ledger_path).map_err(OpenGenesisConfigError::Load) } } } -pub fn unpack_genesis_archive( - archive_filename: &Path, - destination_dir: &Path, - max_genesis_archive_unpacked_size: u64, -) -> std::result::Result<(), UnpackError> { - info!("Extracting {:?}...", archive_filename); - let extract_start = Instant::now(); - - fs::create_dir_all(destination_dir)?; - let tar_bz2 = File::open(archive_filename)?; - let tar = BzDecoder::new(BufReader::new(tar_bz2)); - let mut archive = Archive::new(tar); - unpack_genesis( - &mut archive, - destination_dir, - max_genesis_archive_unpacked_size, - )?; - info!( - "Extracted {:?} in {:?}", - archive_filename, - Instant::now().duration_since(extract_start) - ); - Ok(()) -} - -fn unpack_genesis( - archive: &mut Archive, - unpack_dir: &Path, - max_genesis_archive_unpacked_size: u64, -) -> Result<()> { - unpack_archive( - archive, - max_genesis_archive_unpacked_size, - max_genesis_archive_unpacked_size, - MAX_GENESIS_ARCHIVE_UNPACKED_COUNT, - |p, k| is_valid_genesis_archive_entry(unpack_dir, p, k), - |_| {}, - ) -} - -fn is_valid_genesis_archive_entry<'a>( - unpack_dir: &'a Path, - parts: &[&str], - kind: tar::EntryType, -) -> UnpackPath<'a> { - trace!("validating: {:?} {:?}", parts, kind); - #[allow(clippy::match_like_matches_macro)] - match (parts, kind) { - ([DEFAULT_GENESIS_FILE], GNUSparse) => UnpackPath::Valid(unpack_dir), - ([DEFAULT_GENESIS_FILE], Regular) => UnpackPath::Valid(unpack_dir), - (["rocksdb"], Directory) => UnpackPath::Ignore, - (["rocksdb", _], GNUSparse) => UnpackPath::Ignore, - (["rocksdb", _], Regular) => UnpackPath::Ignore, - (["rocksdb_fifo"], Directory) => UnpackPath::Ignore, - (["rocksdb_fifo", _], GNUSparse) => UnpackPath::Ignore, - (["rocksdb_fifo", _], Regular) => UnpackPath::Ignore, - _ => UnpackPath::Invalid, - } -} +// pub fn unpack_genesis_archive( +// archive_filename: &Path, +// destination_dir: &Path, +// max_genesis_archive_unpacked_size: u64, +// ) -> std::result::Result<(), UnpackError> { +// info!("Extracting {:?}...", archive_filename); +// let extract_start = Instant::now(); + +// fs::create_dir_all(destination_dir)?; +// let tar_bz2 = File::open(archive_filename)?; +// let tar = BzDecoder::new(BufReader::new(tar_bz2)); +// let mut archive = Archive::new(tar); +// unpack_genesis( +// &mut archive, +// destination_dir, +// max_genesis_archive_unpacked_size, +// )?; +// info!( +// "Extracted {:?} in {:?}", +// archive_filename, +// Instant::now().duration_since(extract_start) +// ); +// Ok(()) +// } + +// fn unpack_genesis( +// archive: &mut Archive, +// unpack_dir: &Path, +// max_genesis_archive_unpacked_size: u64, +// ) -> Result<()> { +// unpack_archive( +// archive, +// max_genesis_archive_unpacked_size, +// max_genesis_archive_unpacked_size, +// MAX_GENESIS_ARCHIVE_UNPACKED_COUNT, +// |p, k| is_valid_genesis_archive_entry(unpack_dir, p, k), +// |_| {}, +// ) +// } + +// fn is_valid_genesis_archive_entry<'a>( +// unpack_dir: &'a Path, +// parts: &[&str], +// kind: tar::EntryType, +// ) -> UnpackPath<'a> { +// trace!("validating: {:?} {:?}", parts, kind); +// #[allow(clippy::match_like_matches_macro)] +// match (parts, kind) { +// ([DEFAULT_GENESIS_FILE], GNUSparse) => UnpackPath::Valid(unpack_dir), +// ([DEFAULT_GENESIS_FILE], Regular) => UnpackPath::Valid(unpack_dir), +// (["rocksdb"], Directory) => UnpackPath::Ignore, +// (["rocksdb", _], GNUSparse) => UnpackPath::Ignore, +// (["rocksdb", _], Regular) => UnpackPath::Ignore, +// (["rocksdb_fifo"], Directory) => UnpackPath::Ignore, +// (["rocksdb_fifo", _], GNUSparse) => UnpackPath::Ignore, +// (["rocksdb_fifo", _], Regular) => UnpackPath::Ignore, +// _ => UnpackPath::Invalid, +// } +// } #[cfg(test)] mod tests { use { super::*, assert_matches::assert_matches, - tar::{Builder, Header}, + // tar::{Builder, Header}, }; - #[test] - fn test_archive_is_valid_entry() { - assert!(is_valid_snapshot_archive_entry( - &["snapshots"], - tar::EntryType::Directory - )); - assert!(!is_valid_snapshot_archive_entry( - &["snapshots", ""], - tar::EntryType::Directory - )); - assert!(is_valid_snapshot_archive_entry( - &["snapshots", "3"], - tar::EntryType::Directory - )); - assert!(is_valid_snapshot_archive_entry( - &["snapshots", "3", "3"], - tar::EntryType::Regular - )); - assert!(is_valid_snapshot_archive_entry( - &["version"], - tar::EntryType::Regular - )); - assert!(is_valid_snapshot_archive_entry( - &["accounts"], - tar::EntryType::Directory - )); - assert!(!is_valid_snapshot_archive_entry( - &["accounts", ""], - tar::EntryType::Regular - )); - - assert!(!is_valid_snapshot_archive_entry( - &["snapshots"], - tar::EntryType::Regular - )); - assert!(!is_valid_snapshot_archive_entry( - &["snapshots", "x0"], - tar::EntryType::Directory - )); - assert!(!is_valid_snapshot_archive_entry( - &["snapshots", "0x"], - tar::EntryType::Directory - )); - assert!(!is_valid_snapshot_archive_entry( - &["snapshots", "①"], - tar::EntryType::Directory - )); - assert!(!is_valid_snapshot_archive_entry( - &["snapshots", "0", "aa"], - tar::EntryType::Regular - )); - assert!(!is_valid_snapshot_archive_entry( - &["aaaa"], - tar::EntryType::Regular - )); - } + // #[test] + // fn test_archive_is_valid_entry() { + // assert!(is_valid_snapshot_archive_entry( + // &["snapshots"], + // tar::EntryType::Directory + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots", ""], + // tar::EntryType::Directory + // )); + // assert!(is_valid_snapshot_archive_entry( + // &["snapshots", "3"], + // tar::EntryType::Directory + // )); + // assert!(is_valid_snapshot_archive_entry( + // &["snapshots", "3", "3"], + // tar::EntryType::Regular + // )); + // assert!(is_valid_snapshot_archive_entry( + // &["version"], + // tar::EntryType::Regular + // )); + // assert!(is_valid_snapshot_archive_entry( + // &["accounts"], + // tar::EntryType::Directory + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["accounts", ""], + // tar::EntryType::Regular + // )); + + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots"], + // tar::EntryType::Regular + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots", "x0"], + // tar::EntryType::Directory + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots", "0x"], + // tar::EntryType::Directory + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots", "①"], + // tar::EntryType::Directory + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["snapshots", "0", "aa"], + // tar::EntryType::Regular + // )); + // assert!(!is_valid_snapshot_archive_entry( + // &["aaaa"], + // tar::EntryType::Regular + // )); + // } #[test] fn test_valid_snapshot_accounts() { diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 2d518956aa8992..196754065e370b 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -242,7 +242,7 @@ impl ReadOnlyAccountsCache { data_size.fetch_sub(account_size, Ordering::Relaxed); Some(entry.account) } - + #[allow(dead_code)] pub(crate) fn cache_len(&self) -> usize { self.cache.len() } diff --git a/accounts-db/src/secondary_index.rs b/accounts-db/src/secondary_index.rs index 0157a1c17c307c..99da353fbee8bd 100644 --- a/accounts-db/src/secondary_index.rs +++ b/accounts-db/src/secondary_index.rs @@ -103,6 +103,7 @@ impl SecondaryIndexEntry for RwLockSecondaryIndexEntry { } #[derive(Debug, Default)] +#[allow(dead_code)] pub struct SecondaryIndex { metrics_name: &'static str, // Map from index keys to index values diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 8cd3b6503bc23c..ee4c0a4b25aaee 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -1,6 +1,6 @@ //! The utility structs and functions for writing byte blocks for the //! accounts db tiered storage. - +#[allow(unused_imports)] use { crate::tiered_storage::{footer::AccountBlockFormat, meta::AccountMetaOptionalFields}, std::{ @@ -13,7 +13,7 @@ use { #[derive(Debug)] pub enum ByteBlockEncoder { Raw(Cursor>), - Lz4(lz4::Encoder>), + // Lz4(lz4::Encoder>), // not supported in svm-rollup } /// The byte block writer. @@ -37,12 +37,12 @@ impl ByteBlockWriter { Self { encoder: match encoding { AccountBlockFormat::AlignedRaw => ByteBlockEncoder::Raw(Cursor::new(Vec::new())), - AccountBlockFormat::Lz4 => ByteBlockEncoder::Lz4( - lz4::EncoderBuilder::new() - .level(0) - .build(Vec::new()) - .unwrap(), - ), + // AccountBlockFormat::Lz4 => ByteBlockEncoder::Lz4( + // lz4::EncoderBuilder::new() + // .level(0) + // .build(Vec::new()) + // .unwrap(), + // ), }, len: 0, } @@ -106,7 +106,7 @@ impl ByteBlockWriter { pub fn write(&mut self, buf: &[u8]) -> IoResult<()> { match &mut self.encoder { ByteBlockEncoder::Raw(cursor) => cursor.write_all(buf)?, - ByteBlockEncoder::Lz4(lz4_encoder) => lz4_encoder.write_all(buf)?, + // ByteBlockEncoder::Lz4(lz4_encoder) => lz4_encoder.write_all(buf)?, }; self.len += buf.len(); Ok(()) @@ -117,11 +117,11 @@ impl ByteBlockWriter { pub fn finish(self) -> IoResult> { match self.encoder { ByteBlockEncoder::Raw(cursor) => Ok(cursor.into_inner()), - ByteBlockEncoder::Lz4(lz4_encoder) => { - let (compressed_block, result) = lz4_encoder.finish(); - result?; - Ok(compressed_block) - } + // ByteBlockEncoder::Lz4(lz4_encoder) => { + // let (compressed_block, result) = lz4_encoder.finish(); + // result?; + // Ok(compressed_block) + // } } } } @@ -173,14 +173,14 @@ impl ByteBlockReader { /// /// Note that calling this function with AccountBlockFormat::AlignedRaw encoding /// will result in panic as the input is already decoded. - pub fn decode(encoding: AccountBlockFormat, input: &[u8]) -> IoResult> { + pub fn decode(encoding: AccountBlockFormat, _input: &[u8]) -> IoResult> { match encoding { - AccountBlockFormat::Lz4 => { - let mut decoder = lz4::Decoder::new(input).unwrap(); - let mut output = vec![]; - decoder.read_to_end(&mut output)?; - Ok(output) - } + // AccountBlockFormat::Lz4 => { + // let mut decoder = lz4::Decoder::new(input).unwrap(); + // let mut output = vec![]; + // decoder.read_to_end(&mut output)?; + // Ok(output) + // } AccountBlockFormat::AlignedRaw => panic!("the input buffer is already decoded"), } } @@ -230,10 +230,10 @@ mod tests { write_single(AccountBlockFormat::AlignedRaw); } - #[test] - fn test_write_single_encoded_format() { - write_single(AccountBlockFormat::Lz4); - } + // #[test] + // fn test_write_single_encoded_format() { + // write_single(AccountBlockFormat::Lz4); + // } #[derive(Debug, PartialEq)] struct TestMetaStruct { @@ -334,10 +334,10 @@ mod tests { write_multiple(AccountBlockFormat::AlignedRaw); } - #[test] - fn test_write_multiple_lz4_format() { - write_multiple(AccountBlockFormat::Lz4); - } + // #[test] + // fn test_write_multiple_lz4_format() { + // write_multiple(AccountBlockFormat::Lz4); + // } fn write_optional_fields(format: AccountBlockFormat) { let mut test_epoch = 5432312; @@ -395,8 +395,8 @@ mod tests { write_optional_fields(AccountBlockFormat::AlignedRaw); } - #[test] - fn test_write_optional_fields_lz4_format() { - write_optional_fields(AccountBlockFormat::Lz4); - } + // #[test] + // fn test_write_optional_fields_lz4_format() { + // write_optional_fields(AccountBlockFormat::Lz4); + // } } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index f93f7f88195e67..c377b5b04f59d2 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -60,7 +60,7 @@ pub enum AccountMetaFormat { pub enum AccountBlockFormat { #[default] AlignedRaw = 0, - Lz4 = 1, + // Lz4 = 1, // not supported in svm-rollup } #[derive(Debug, PartialEq, Eq, Clone, Copy)] diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index ac4da3bc28eb8a..4d4a14e9cf28c7 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -69,7 +69,8 @@ pub(crate) const HOT_ACCOUNT_ALIGNMENT: usize = 8; pub(crate) const HOT_BLOCK_ALIGNMENT: usize = 8; /// The maximum supported offset for hot accounts storage. -const MAX_HOT_ACCOUNT_OFFSET: usize = u32::MAX as usize * HOT_ACCOUNT_ALIGNMENT; +// const MAX_HOT_ACCOUNT_OFFSET: usize = u32::MAX as usize * HOT_ACCOUNT_ALIGNMENT; +const MAX_HOT_ACCOUNT_OFFSET: usize = (u32::MAX as u64 * HOT_ACCOUNT_ALIGNMENT as u64) as usize; // returns the required number of padding fn padding_bytes(data_len: usize) -> u8 { diff --git a/download-utils/src/lib.rs b/download-utils/src/lib.rs index 17d50d31b55628..0dee8abf7ead71 100644 --- a/download-utils/src/lib.rs +++ b/download-utils/src/lib.rs @@ -284,10 +284,10 @@ pub fn download_snapshot_archive( fs::create_dir_all(&snapshot_archives_remote_dir).unwrap(); for archive_format in [ - ArchiveFormat::TarZstd, + // ArchiveFormat::TarZstd, ArchiveFormat::TarGzip, - ArchiveFormat::TarBzip2, - ArchiveFormat::TarLz4, + // ArchiveFormat::TarBzip2, + // ArchiveFormat::TarLz4, ArchiveFormat::Tar, ] { let destination_path = match snapshot_kind { diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 54b66b59be76e0..ec0ca35fdfc6ca 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -18,7 +18,7 @@ blake3 = { workspace = true } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } byteorder = { workspace = true } -bzip2 = { workspace = true } +# bzip2 = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } dir-diff = { workspace = true } @@ -30,7 +30,7 @@ itertools = { workspace = true } lazy_static = { workspace = true } libc = { workspace = true } log = { workspace = true } -lz4 = { workspace = true } +# lz4 = { workspace = true } memmap2 = { workspace = true } mockall = { workspace = true } modular-bitfield = { workspace = true } @@ -59,7 +59,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-inline-spl = { workspace = true } solana-loader-v4-program = { workspace = true } solana-measure = { workspace = true } -solana-perf = { workspace = true } +# solana-perf = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } @@ -75,11 +75,11 @@ solana-zk-token-sdk = { workspace = true } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } -symlink = { workspace = true } -tar = { workspace = true } +# symlink = { workspace = true } +# tar = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } -zstd = { workspace = true } +# zstd = { workspace = true } [lib] crate-type = ["lib"] @@ -114,7 +114,7 @@ frozen-abi = [ "solana-accounts-db/frozen-abi", "solana-compute-budget/frozen-abi", "solana-cost-model/frozen-abi", - "solana-perf/frozen-abi", + # "solana-perf/frozen-abi", "solana-program-runtime/frozen-abi", "solana-sdk/frozen-abi", "solana-svm/frozen-abi", diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ebdb5cecd1a825..1156b5c3338b1b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -96,7 +96,7 @@ use { solana_cost_model::cost_tracker::CostTracker, solana_loader_v4_program::create_program_runtime_environment_v2, solana_measure::{measure, measure::Measure, measure_us}, - solana_perf::perf_libs, + // solana_perf::perf_libs, solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::{ @@ -6794,12 +6794,13 @@ impl Bank { // 1. Transaction forwarding delay // 2. The slot at which the next leader will actually process the transaction // Drop the transaction if it will expire by the time the next node receives and processes it - let api = perf_libs::api(); - let max_tx_fwd_delay = if api.is_none() { - MAX_TRANSACTION_FORWARDING_DELAY - } else { - MAX_TRANSACTION_FORWARDING_DELAY_GPU - }; + // let api = perf_libs::api(); + // let max_tx_fwd_delay = if api.is_none() { + // MAX_TRANSACTION_FORWARDING_DELAY + // } else { + // MAX_TRANSACTION_FORWARDING_DELAY_GPU + // }; + let max_tx_fwd_delay = MAX_TRANSACTION_FORWARDING_DELAY_GPU; self.check_transactions( transactions, diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index c7d29de7109626..faa4413ede4d09 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -16,6 +16,7 @@ use { solana_sdk::{ clock::{Epoch, Slot}, hash::Hash, + timing, }, std::{ collections::{hash_map::Entry, HashMap, HashSet}, diff --git a/runtime/src/snapshot_config.rs b/runtime/src/snapshot_config.rs index f920585c7697c4..d60d60411504f5 100644 --- a/runtime/src/snapshot_config.rs +++ b/runtime/src/snapshot_config.rs @@ -59,7 +59,7 @@ impl Default for SnapshotConfig { full_snapshot_archives_dir: PathBuf::default(), incremental_snapshot_archives_dir: PathBuf::default(), bank_snapshots_dir: PathBuf::default(), - archive_format: ArchiveFormat::TarZstd, + archive_format: ArchiveFormat::Tar, snapshot_version: SnapshotVersion::default(), maximum_full_snapshot_archives_to_retain: snapshot_utils::DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 7d510f424a95d5..92e0c8b4c8876e 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,3 +1,4 @@ +#[allow(unused_imports)] use { crate::{ bank::{BankFieldsToSerialize, BankSlotDelta}, @@ -14,7 +15,7 @@ use { RebuiltSnapshotStorage, SnapshotStorageRebuilder, }, }, - bzip2::bufread::BzDecoder, + // bzip2::bufread::BzDecoder, crossbeam_channel::Sender, flate2::read::GzDecoder, lazy_static::lazy_static, @@ -48,7 +49,7 @@ use { sync::Arc, thread::{Builder, JoinHandle}, }, - tar::{self, Archive}, + // tar::{self, Archive}, tempfile::TempDir, thiserror::Error, }; @@ -961,6 +962,8 @@ fn serialize_snapshot( } /// Archives a snapshot into `archive_path` +#[allow(unused_variables)] +#[allow(dead_code)] fn archive_snapshot( snapshot_kind: SnapshotKind, snapshot_slot: Slot, @@ -1003,22 +1006,22 @@ fn archive_snapshot( })?; let staging_snapshot_file = staging_snapshot_dir.join(&slot_str); let src_snapshot_file = src_snapshot_dir.join(slot_str); - symlink::symlink_file(&src_snapshot_file, &staging_snapshot_file) - .map_err(|err| E::SymlinkSnapshot(err, src_snapshot_file, staging_snapshot_file))?; + // symlink::symlink_file(&src_snapshot_file, &staging_snapshot_file) + // .map_err(|err| E::SymlinkSnapshot(err, src_snapshot_file, staging_snapshot_file))?; // Following the existing archive format, the status cache is under snapshots/, not under / // like in the snapshot dir. let staging_status_cache = staging_snapshots_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); let src_status_cache = src_snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); - symlink::symlink_file(&src_status_cache, &staging_status_cache) - .map_err(|err| E::SymlinkStatusCache(err, src_status_cache, staging_status_cache))?; + // symlink::symlink_file(&src_status_cache, &staging_status_cache) + // .map_err(|err| E::SymlinkStatusCache(err, src_status_cache, staging_status_cache))?; // The bank snapshot has the version file, so symlink it to the correct staging path let staging_version_file = staging_dir.path().join(SNAPSHOT_VERSION_FILENAME); let src_version_file = src_snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); - symlink::symlink_file(&src_version_file, &staging_version_file).map_err(|err| { - E::SymlinkVersionFile(err, src_version_file, staging_version_file.clone()) - })?; + // symlink::symlink_file(&src_version_file, &staging_version_file).map_err(|err| { + // E::SymlinkVersionFile(err, src_version_file, staging_version_file.clone()) + // })?; // Tar the staging directory into the archive at `staging_archive_path` let staging_archive_path = tar_dir.join(format!( @@ -1033,70 +1036,70 @@ fn archive_snapshot( .map_err(|err| E::CreateArchiveFile(err, staging_archive_path.clone()))?; let do_archive_files = |encoder: &mut dyn Write| -> std::result::Result<(), E> { - let mut archive = tar::Builder::new(encoder); - // Serialize the version and snapshots files before accounts so we can quickly determine the version - // and other bank fields. This is necessary if we want to interleave unpacking with reconstruction - archive - .append_path_with_name(&staging_version_file, SNAPSHOT_VERSION_FILENAME) - .map_err(E::ArchiveVersionFile)?; - archive - .append_dir_all(SNAPSHOTS_DIR, &staging_snapshots_dir) - .map_err(E::ArchiveSnapshotsDir)?; - - for storage in snapshot_storages { - let path_in_archive = Path::new(ACCOUNTS_DIR).join(AccountsFile::file_name( - storage.slot(), - storage.append_vec_id(), - )); - match storage.accounts.internals_for_archive() { - InternalsForArchive::Mmap(data) => { - let mut header = tar::Header::new_gnu(); - header.set_path(path_in_archive).map_err(|err| { - E::ArchiveAccountStorageFile(err, storage.path().to_path_buf()) - })?; - header.set_size(storage.capacity()); - header.set_cksum(); - archive.append(&header, data) - } - InternalsForArchive::FileIo(path) => { - archive.append_path_with_name(path, path_in_archive) - } - } - .map_err(|err| E::ArchiveAccountStorageFile(err, storage.path().to_path_buf()))?; - } - - archive.into_inner().map_err(E::FinishArchive)?; + // let mut archive = tar::Builder::new(encoder); + // // Serialize the version and snapshots files before accounts so we can quickly determine the version + // // and other bank fields. This is necessary if we want to interleave unpacking with reconstruction + // archive + // .append_path_with_name(&staging_version_file, SNAPSHOT_VERSION_FILENAME) + // .map_err(E::ArchiveVersionFile)?; + // archive + // .append_dir_all(SNAPSHOTS_DIR, &staging_snapshots_dir) + // .map_err(E::ArchiveSnapshotsDir)?; + + // for storage in snapshot_storages { + // let path_in_archive = Path::new(ACCOUNTS_DIR).join(AccountsFile::file_name( + // storage.slot(), + // storage.append_vec_id(), + // )); + // match storage.accounts.internals_for_archive() { + // InternalsForArchive::Mmap(data) => { + // // let mut header = tar::Header::new_gnu(); + // // header.set_path(path_in_archive).map_err(|err| { + // // E::ArchiveAccountStorageFile(err, storage.path().to_path_buf()) + // // })?; + // // header.set_size(storage.capacity()); + // // header.set_cksum(); + // // archive.append(&header, data) + // } + // InternalsForArchive::FileIo(path) => { + // archive.append_path_with_name(path, path_in_archive) + // } + // } + // .map_err(|err| E::ArchiveAccountStorageFile(err, storage.path().to_path_buf()))?; + // } + + // archive.into_inner().map_err(E::FinishArchive)?; Ok(()) }; match archive_format { - ArchiveFormat::TarBzip2 => { - let mut encoder = - bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best()); - do_archive_files(&mut encoder)?; - encoder.finish().map_err(E::FinishEncoder)?; - } + // ArchiveFormat::TarBzip2 => { + // let mut encoder = + // bzip2::write::BzEncoder::new(archive_file, bzip2::Compression::best()); + // do_archive_files(&mut encoder)?; + // encoder.finish().map_err(E::FinishEncoder)?; + // } ArchiveFormat::TarGzip => { let mut encoder = flate2::write::GzEncoder::new(archive_file, flate2::Compression::default()); do_archive_files(&mut encoder)?; encoder.finish().map_err(E::FinishEncoder)?; } - ArchiveFormat::TarZstd => { - let mut encoder = - zstd::stream::Encoder::new(archive_file, 0).map_err(E::CreateEncoder)?; - do_archive_files(&mut encoder)?; - encoder.finish().map_err(E::FinishEncoder)?; - } - ArchiveFormat::TarLz4 => { - let mut encoder = lz4::EncoderBuilder::new() - .level(1) - .build(archive_file) - .map_err(E::CreateEncoder)?; - do_archive_files(&mut encoder)?; - let (_output, result) = encoder.finish(); - result.map_err(E::FinishEncoder)?; - } + // ArchiveFormat::TarZstd => { + // let mut encoder = + // zstd::stream::Encoder::new(archive_file, 0).map_err(E::CreateEncoder)?; + // do_archive_files(&mut encoder)?; + // encoder.finish().map_err(E::FinishEncoder)?; + // } + // ArchiveFormat::TarLz4 => { + // let mut encoder = lz4::EncoderBuilder::new() + // .level(1) + // .build(archive_file) + // .map_err(E::CreateEncoder)?; + // do_archive_files(&mut encoder)?; + // let (_output, result) = encoder.finish(); + // result.map_err(E::FinishEncoder)?; + // } ArchiveFormat::Tar => { do_archive_files(&mut archive_file)?; } @@ -1438,14 +1441,14 @@ fn get_snapshot_accounts_hardlink_dir( snapshot_hardlink_dir.clone(), ) })?; - let symlink_path = hardlinks_dir.as_ref().join(format!("account_path_{idx}")); - symlink::symlink_dir(&snapshot_hardlink_dir, &symlink_path).map_err(|err| { - GetSnapshotAccountsHardLinkDirError::SymlinkSnapshotHardLinkDir { - source: err, - original: snapshot_hardlink_dir.clone(), - link: symlink_path, - } - })?; + let _symlink_path = hardlinks_dir.as_ref().join(format!("account_path_{idx}")); + // symlink::symlink_dir(&snapshot_hardlink_dir, &symlink_path).map_err(|err| { + // GetSnapshotAccountsHardLinkDirError::SymlinkSnapshotHardLinkDir { + // source: err, + // original: snapshot_hardlink_dir.clone(), + // link: symlink_path, + // } + // })?; account_paths.insert(account_path); }; @@ -1565,30 +1568,32 @@ pub fn verify_and_unarchive_snapshots( } /// Spawns a thread for unpacking a snapshot -fn spawn_unpack_snapshot_thread( - file_sender: Sender, - account_paths: Arc>, - ledger_dir: Arc, - mut archive: Archive, - parallel_selector: Option, - thread_index: usize, -) -> JoinHandle<()> { - Builder::new() - .name(format!("solUnpkSnpsht{thread_index:02}")) - .spawn(move || { - hardened_unpack::streaming_unpack_snapshot( - &mut archive, - ledger_dir.as_path(), - &account_paths, - parallel_selector, - &file_sender, - ) - .unwrap(); - }) - .unwrap() -} +// fn spawn_unpack_snapshot_thread( +// file_sender: Sender, +// account_paths: Arc>, +// ledger_dir: Arc, +// // mut archive: Archive, +// parallel_selector: Option, +// thread_index: usize, +// ) -> JoinHandle<()> { +// Builder::new() +// .name(format!("solUnpkSnpsht{thread_index:02}")) +// .spawn(move || { +// hardened_unpack::streaming_unpack_snapshot( +// &mut archive, +// ledger_dir.as_path(), +// &account_paths, +// parallel_selector, +// &file_sender, +// ) +// .unwrap(); +// }) +// .unwrap() +// } /// Streams unpacked files across channel +#[allow(dead_code)] +#[allow(unused_variables)] fn streaming_unarchive_snapshot( file_sender: Sender, account_paths: Vec, @@ -1597,37 +1602,38 @@ fn streaming_unarchive_snapshot( archive_format: ArchiveFormat, num_threads: usize, ) -> Vec> { - let account_paths = Arc::new(account_paths); - let ledger_dir = Arc::new(ledger_dir); - let shared_buffer = untar_snapshot_create_shared_buffer(&snapshot_archive_path, archive_format); - - // All shared buffer readers need to be created before the threads are spawned - let archives: Vec<_> = (0..num_threads) - .map(|_| { - let reader = SharedBufferReader::new(&shared_buffer); - Archive::new(reader) - }) - .collect(); - - archives - .into_iter() - .enumerate() - .map(|(thread_index, archive)| { - let parallel_selector = Some(ParallelSelector { - index: thread_index, - divisions: num_threads, - }); - - spawn_unpack_snapshot_thread( - file_sender.clone(), - account_paths.clone(), - ledger_dir.clone(), - archive, - parallel_selector, - thread_index, - ) - }) - .collect() + // let account_paths = Arc::new(account_paths); + // let ledger_dir = Arc::new(ledger_dir); + // let shared_buffer = untar_snapshot_create_shared_buffer(&snapshot_archive_path, archive_format); + + // // All shared buffer readers need to be created before the threads are spawned + // let archives: Vec<_> = (0..num_threads) + // .map(|_| { + // let reader = SharedBufferReader::new(&shared_buffer); + // Archive::new(reader) + // }) + // .collect(); + + // archives + // .into_iter() + // .enumerate() + // .map(|(thread_index, archive)| { + // let parallel_selector = Some(ParallelSelector { + // index: thread_index, + // divisions: num_threads, + // }); + + // spawn_unpack_snapshot_thread( + // file_sender.clone(), + // account_paths.clone(), + // ledger_dir.clone(), + // archive, + // parallel_selector, + // thread_index, + // ) + // }) + // .collect() + vec![] } /// BankSnapshotInfo::new_from_dir() requires a few meta files to accept a snapshot dir @@ -2207,47 +2213,48 @@ pub fn purge_old_snapshot_archives( } } -#[cfg(feature = "dev-context-only-utils")] -fn unpack_snapshot_local( - shared_buffer: SharedBuffer, - ledger_dir: &Path, - account_paths: &[PathBuf], - parallel_divisions: usize, -) -> Result { - assert!(parallel_divisions > 0); - - // allocate all readers before any readers start reading - let readers = (0..parallel_divisions) - .map(|_| SharedBufferReader::new(&shared_buffer)) - .collect::>(); - - // create 'parallel_divisions' # of parallel workers, each responsible for 1/parallel_divisions of all the files to extract. - let all_unpacked_append_vec_map = readers - .into_par_iter() - .enumerate() - .map(|(index, reader)| { - let parallel_selector = Some(ParallelSelector { - index, - divisions: parallel_divisions, - }); - let mut archive = Archive::new(reader); - hardened_unpack::unpack_snapshot( - &mut archive, - ledger_dir, - account_paths, - parallel_selector, - ) - }) - .collect::>(); - - let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); - for h in all_unpacked_append_vec_map { - unpacked_append_vec_map.extend(h?); - } - - Ok(unpacked_append_vec_map) -} - +// #[cfg(feature = "dev-context-only-utils")] +// fn unpack_snapshot_local( +// shared_buffer: SharedBuffer, +// ledger_dir: &Path, +// account_paths: &[PathBuf], +// parallel_divisions: usize, +// ) -> Result { +// assert!(parallel_divisions > 0); + +// // allocate all readers before any readers start reading +// let readers = (0..parallel_divisions) +// .map(|_| SharedBufferReader::new(&shared_buffer)) +// .collect::>(); + +// // create 'parallel_divisions' # of parallel workers, each responsible for 1/parallel_divisions of all the files to extract. +// let all_unpacked_append_vec_map = readers +// .into_par_iter() +// .enumerate() +// .map(|(index, reader)| { +// let parallel_selector = Some(ParallelSelector { +// index, +// divisions: parallel_divisions, +// }); +// let mut archive = Archive::new(reader); +// hardened_unpack::unpack_snapshot( +// &mut archive, +// ledger_dir, +// account_paths, +// parallel_selector, +// ) +// }) +// .collect::>(); + +// let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); +// for h in all_unpacked_append_vec_map { +// unpacked_append_vec_map.extend(h?); +// } + +// Ok(unpacked_append_vec_map) +// } + +#[allow(dead_code)] fn untar_snapshot_create_shared_buffer( snapshot_tar: &Path, archive_format: ArchiveFormat, @@ -2263,29 +2270,29 @@ fn untar_snapshot_create_shared_buffer( .unwrap() }; match archive_format { - ArchiveFormat::TarBzip2 => SharedBuffer::new(BzDecoder::new(BufReader::new(open_file()))), + // ArchiveFormat::TarBzip2 => SharedBuffer::new(BzDecoder::new(BufReader::new(open_file()))), ArchiveFormat::TarGzip => SharedBuffer::new(GzDecoder::new(BufReader::new(open_file()))), - ArchiveFormat::TarZstd => SharedBuffer::new( - zstd::stream::read::Decoder::new(BufReader::new(open_file())).unwrap(), - ), - ArchiveFormat::TarLz4 => { - SharedBuffer::new(lz4::Decoder::new(BufReader::new(open_file())).unwrap()) - } + // ArchiveFormat::TarZstd => SharedBuffer::new( + // zstd::stream::read::Decoder::new(BufReader::new(open_file())).unwrap(), + // ), + // ArchiveFormat::TarLz4 => { + // SharedBuffer::new(lz4::Decoder::new(BufReader::new(open_file())).unwrap()) + // } ArchiveFormat::Tar => SharedBuffer::new(BufReader::new(open_file())), } } -#[cfg(feature = "dev-context-only-utils")] -fn untar_snapshot_in( - snapshot_tar: impl AsRef, - unpack_dir: &Path, - account_paths: &[PathBuf], - archive_format: ArchiveFormat, - parallel_divisions: usize, -) -> Result { - let shared_buffer = untar_snapshot_create_shared_buffer(snapshot_tar.as_ref(), archive_format); - unpack_snapshot_local(shared_buffer, unpack_dir, account_paths, parallel_divisions) -} +// #[cfg(feature = "dev-context-only-utils")] +// fn untar_snapshot_in( +// snapshot_tar: impl AsRef, +// unpack_dir: &Path, +// account_paths: &[PathBuf], +// archive_format: ArchiveFormat, +// parallel_divisions: usize, +// ) -> Result { +// let shared_buffer = untar_snapshot_create_shared_buffer(snapshot_tar.as_ref(), archive_format); +// unpack_snapshot_local(shared_buffer, unpack_dir, account_paths, parallel_divisions) +// } pub fn verify_unpacked_snapshots_dir_and_version( unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, @@ -2339,6 +2346,8 @@ pub enum VerifyBank { } #[cfg(feature = "dev-context-only-utils")] +#[allow(dead_code)] +#[allow(unused_variables)] pub fn verify_snapshot_archive( snapshot_archive: impl AsRef, snapshots_to_verify: impl AsRef, @@ -2349,14 +2358,14 @@ pub fn verify_snapshot_archive( let temp_dir = tempfile::TempDir::new().unwrap(); let unpack_dir = temp_dir.path(); let unpack_account_dir = create_accounts_run_and_snapshot_dirs(unpack_dir).unwrap().0; - untar_snapshot_in( - snapshot_archive, - unpack_dir, - &[unpack_account_dir.clone()], - archive_format, - 1, - ) - .unwrap(); + // untar_snapshot_in( + // snapshot_archive, + // unpack_dir, + // &[unpack_account_dir.clone()], + // archive_format, + // 1, + // ) + // .unwrap(); // Check snapshots are the same let unpacked_snapshots = unpack_dir.join("snapshots"); diff --git a/runtime/src/snapshot_utils/archive_format.rs b/runtime/src/snapshot_utils/archive_format.rs index d807f4447a2b7b..c9814b15102295 100644 --- a/runtime/src/snapshot_utils/archive_format.rs +++ b/runtime/src/snapshot_utils/archive_format.rs @@ -21,10 +21,10 @@ pub const TAR_EXTENSION: &str = "tar"; /// The different archive formats used for snapshots #[derive(Copy, Clone, Debug, Eq, PartialEq, Display)] pub enum ArchiveFormat { - TarBzip2, + // TarBzip2, // Unsupproted in svm-rollup TarGzip, - TarZstd, - TarLz4, + // TarZstd, // Unsupproted in svm-rollup + // TarLz4, // Unsupproted in svm-rollup Tar, } @@ -32,18 +32,18 @@ impl ArchiveFormat { /// Get the file extension for the ArchiveFormat pub fn extension(&self) -> &str { match self { - ArchiveFormat::TarBzip2 => TAR_BZIP2_EXTENSION, + // ArchiveFormat::TarBzip2 => TAR_BZIP2_EXTENSION, ArchiveFormat::TarGzip => TAR_GZIP_EXTENSION, - ArchiveFormat::TarZstd => TAR_ZSTD_EXTENSION, - ArchiveFormat::TarLz4 => TAR_LZ4_EXTENSION, + // ArchiveFormat::TarZstd => TAR_ZSTD_EXTENSION, + // ArchiveFormat::TarLz4 => TAR_LZ4_EXTENSION, ArchiveFormat::Tar => TAR_EXTENSION, } } pub fn from_cli_arg(archive_format_str: &str) -> Option { match archive_format_str { - "zstd" => Some(ArchiveFormat::TarZstd), - "lz4" => Some(ArchiveFormat::TarLz4), + // "zstd" => Some(ArchiveFormat::TarZstd), + // "lz4" => Some(ArchiveFormat::TarLz4), _ => None, } } @@ -56,10 +56,10 @@ impl TryFrom<&str> for ArchiveFormat { fn try_from(extension: &str) -> Result { match extension { - TAR_BZIP2_EXTENSION => Ok(ArchiveFormat::TarBzip2), + // TAR_BZIP2_EXTENSION => Ok(ArchiveFormat::TarBzip2), TAR_GZIP_EXTENSION => Ok(ArchiveFormat::TarGzip), - TAR_ZSTD_EXTENSION => Ok(ArchiveFormat::TarZstd), - TAR_LZ4_EXTENSION => Ok(ArchiveFormat::TarLz4), + // TAR_ZSTD_EXTENSION => Ok(ArchiveFormat::TarZstd), + // TAR_LZ4_EXTENSION => Ok(ArchiveFormat::TarLz4), TAR_EXTENSION => Ok(ArchiveFormat::Tar), _ => Err(ParseError::InvalidExtension(extension.to_string())), } @@ -96,31 +96,31 @@ mod tests { #[test] fn test_extension() { - assert_eq!(ArchiveFormat::TarBzip2.extension(), TAR_BZIP2_EXTENSION); + // assert_eq!(ArchiveFormat::TarBzip2.extension(), TAR_BZIP2_EXTENSION); assert_eq!(ArchiveFormat::TarGzip.extension(), TAR_GZIP_EXTENSION); - assert_eq!(ArchiveFormat::TarZstd.extension(), TAR_ZSTD_EXTENSION); - assert_eq!(ArchiveFormat::TarLz4.extension(), TAR_LZ4_EXTENSION); + // assert_eq!(ArchiveFormat::TarZstd.extension(), TAR_ZSTD_EXTENSION); + // assert_eq!(ArchiveFormat::TarLz4.extension(), TAR_LZ4_EXTENSION); assert_eq!(ArchiveFormat::Tar.extension(), TAR_EXTENSION); } #[test] fn test_try_from() { - assert_eq!( - ArchiveFormat::try_from(TAR_BZIP2_EXTENSION), - Ok(ArchiveFormat::TarBzip2) - ); + // assert_eq!( + // ArchiveFormat::try_from(TAR_BZIP2_EXTENSION), + // Ok(ArchiveFormat::TarBzip2) + // ); assert_eq!( ArchiveFormat::try_from(TAR_GZIP_EXTENSION), Ok(ArchiveFormat::TarGzip) ); - assert_eq!( - ArchiveFormat::try_from(TAR_ZSTD_EXTENSION), - Ok(ArchiveFormat::TarZstd) - ); - assert_eq!( - ArchiveFormat::try_from(TAR_LZ4_EXTENSION), - Ok(ArchiveFormat::TarLz4) - ); + // assert_eq!( + // ArchiveFormat::try_from(TAR_ZSTD_EXTENSION), + // Ok(ArchiveFormat::TarZstd) + // ); + // assert_eq!( + // ArchiveFormat::try_from(TAR_LZ4_EXTENSION), + // Ok(ArchiveFormat::TarLz4) + // ); assert_eq!( ArchiveFormat::try_from(TAR_EXTENSION), Ok(ArchiveFormat::Tar) @@ -133,22 +133,22 @@ mod tests { #[test] fn test_from_str() { - assert_eq!( - ArchiveFormat::from_str(TAR_BZIP2_EXTENSION), - Ok(ArchiveFormat::TarBzip2) - ); + // assert_eq!( + // ArchiveFormat::from_str(TAR_BZIP2_EXTENSION), + // Ok(ArchiveFormat::TarBzip2) + // ); assert_eq!( ArchiveFormat::from_str(TAR_GZIP_EXTENSION), Ok(ArchiveFormat::TarGzip) ); - assert_eq!( - ArchiveFormat::from_str(TAR_ZSTD_EXTENSION), - Ok(ArchiveFormat::TarZstd) - ); - assert_eq!( - ArchiveFormat::from_str(TAR_LZ4_EXTENSION), - Ok(ArchiveFormat::TarLz4) - ); + // assert_eq!( + // ArchiveFormat::from_str(TAR_ZSTD_EXTENSION), + // Ok(ArchiveFormat::TarZstd) + // ); + // assert_eq!( + // ArchiveFormat::from_str(TAR_LZ4_EXTENSION), + // Ok(ArchiveFormat::TarLz4) + // ); assert_eq!( ArchiveFormat::from_str(TAR_EXTENSION), Ok(ArchiveFormat::Tar) @@ -159,14 +159,14 @@ mod tests { ); } - #[test] - fn test_from_cli_arg() { - let golden = [Some(ArchiveFormat::TarZstd), Some(ArchiveFormat::TarLz4)]; + // #[test] + // fn test_from_cli_arg() { + // let golden = [Some(ArchiveFormat::TarZstd), Some(ArchiveFormat::TarLz4)]; - for (arg, expected) in zip(SUPPORTED_ARCHIVE_COMPRESSION.iter(), golden.into_iter()) { - assert_eq!(ArchiveFormat::from_cli_arg(arg), expected); - } + // for (arg, expected) in zip(SUPPORTED_ARCHIVE_COMPRESSION.iter(), golden.into_iter()) { + // assert_eq!(ArchiveFormat::from_cli_arg(arg), expected); + // } - assert_eq!(ArchiveFormat::from_cli_arg("bad"), None); - } + // assert_eq!(ArchiveFormat::from_cli_arg("bad"), None); + // } } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 2502e57f0a38eb..75538ab20a209b 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -6,6 +6,7 @@ use { im::HashMap as ImHashMap, log::error, num_derive::ToPrimitive, + // num_traits::ToPrimitive, rayon::{prelude::*, ThreadPool}, solana_accounts_db::stake_rewards::StakeReward, solana_sdk::{