diff --git a/Cargo.lock b/Cargo.lock index 40e204b..c1ebf86 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1362,7 +1362,7 @@ dependencies = [ [[package]] name = "ic-oss" -version = "0.6.0" +version = "0.6.1" dependencies = [ "bytes", "candid", @@ -1381,7 +1381,7 @@ dependencies = [ [[package]] name = "ic-oss-can" -version = "0.6.0" +version = "0.6.1" dependencies = [ "bytes", "candid", @@ -1395,7 +1395,7 @@ dependencies = [ [[package]] name = "ic-oss-cli" -version = "0.6.0" +version = "0.6.1" dependencies = [ "anyhow", "bytes", @@ -1422,7 +1422,7 @@ dependencies = [ [[package]] name = "ic-oss-cose" -version = "0.6.0" +version = "0.6.1" dependencies = [ "base64 0.21.7", "candid", @@ -1443,7 +1443,7 @@ dependencies = [ [[package]] name = "ic-oss-types" -version = "0.6.0" +version = "0.6.1" dependencies = [ "base64 0.21.7", "candid", @@ -1516,7 +1516,7 @@ checksum = "8de254dd67bbd58073e23dc1c8553ba12fa1dc610a19de94ad2bbcd0460c067f" [[package]] name = "ic_oss_bucket" -version = "0.6.0" +version = "0.6.1" dependencies = [ "base64 0.21.7", "bytes", @@ -1545,7 +1545,7 @@ dependencies = [ [[package]] name = "ic_oss_cluster" -version = "0.6.0" +version = "0.6.1" dependencies = [ "base64 0.21.7", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 609f116..51cc192 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ strip = true opt-level = 's' [workspace.package] -version = "0.6.0" +version = "0.6.1" edition = "2021" repository = "https://github.com/ldclabs/ic-oss" keywords = ["file", "storage", "oss", "s3", "icp"] diff --git a/examples/ai_canister/src/lib.rs b/examples/ai_canister/src/lib.rs index 4bd04a9..e1b5c88 100644 --- a/examples/ai_canister/src/lib.rs +++ b/examples/ai_canister/src/lib.rs @@ -179,8 +179,6 @@ fn admin_load_model(args: LoadModelInput) -> Result { #[ic_cdk::init] fn init() { - state::save(); - fs::save(); // state::init_rand(); } diff --git a/src/ic_oss_bucket/ic_oss_bucket.did b/src/ic_oss_bucket/ic_oss_bucket.did index 09287b9..19f3d72 100644 --- a/src/ic_oss_bucket/ic_oss_bucket.did +++ b/src/ic_oss_bucket/ic_oss_bucket.did @@ -12,11 +12,13 @@ type BTreeMap = vec record { }; type BucketInfo = record { status : int8; + total_chunks : nat64; trusted_eddsa_pub_keys : vec blob; managers : vec principal; name : text; max_custom_data_size : nat16; auditors : vec principal; + total_files : nat64; max_children : nat16; enable_hash_index : bool; max_file_size : nat64; @@ -24,8 +26,7 @@ type BucketInfo = record { visibility : nat8; max_folder_depth : nat8; trusted_ecdsa_pub_keys : vec blob; - file_count : nat64; - folder_count : nat64; + total_folders : nat64; file_id : nat32; }; type CanisterArgs = variant { Upgrade : UpgradeArgs; Init : InitArgs }; diff --git a/src/ic_oss_bucket/src/api_init.rs b/src/ic_oss_bucket/src/api_init.rs index bb0bde7..63354b1 100644 --- a/src/ic_oss_bucket/src/api_init.rs +++ b/src/ic_oss_bucket/src/api_init.rs @@ -99,7 +99,6 @@ fn init(args: Option) { None => {} } - store::state::save(); store::state::init_http_certified_data(); } diff --git a/src/ic_oss_bucket/src/api_query.rs b/src/ic_oss_bucket/src/api_query.rs index 78978e0..af1c5e3 100644 --- a/src/ic_oss_bucket/src/api_query.rs +++ b/src/ic_oss_bucket/src/api_query.rs @@ -36,9 +36,7 @@ fn get_bucket_info(access_token: Option) -> Result Ok(store::state::with(|r| BucketInfo { name: r.name.clone(), - file_count: r.file_count, file_id: r.file_id, - folder_count: r.folder_count, folder_id: r.folder_id, max_file_size: r.max_file_size, max_folder_depth: r.max_folder_depth, @@ -47,6 +45,9 @@ fn get_bucket_info(access_token: Option) -> Result enable_hash_index: r.enable_hash_index, status: r.status, visibility: r.visibility, + total_files: store::fs::total_files(), + total_chunks: store::fs::total_chunks(), + total_folders: store::fs::total_folders(), managers: r.managers.clone(), auditors: r.auditors.clone(), trusted_ecdsa_pub_keys: r.trusted_ecdsa_pub_keys.clone(), diff --git a/src/ic_oss_bucket/src/store.rs b/src/ic_oss_bucket/src/store.rs index 4e376d8..9cdb486 100644 --- a/src/ic_oss_bucket/src/store.rs +++ b/src/ic_oss_bucket/src/store.rs @@ -34,9 +34,7 @@ type Memory = VirtualMemory; #[derive(Clone, Deserialize, Serialize)] pub struct Bucket { pub name: String, - pub file_count: u64, pub file_id: u32, - pub folder_count: u64, pub folder_id: u32, pub max_file_size: u64, pub max_folder_depth: u8, @@ -58,10 +56,8 @@ impl Default for Bucket { fn default() -> Self { Self { name: "default".to_string(), - file_count: 0, file_id: 0, - folder_count: 1, // The root folder 0 is created by default - folder_id: 1, + folder_id: 1, // The root folder 0 is created by default max_file_size: MAX_FILE_SIZE, max_folder_depth: 10, max_children: 100, @@ -789,6 +785,18 @@ pub mod state { pub mod fs { use super::*; + pub fn total_files() -> u64 { + FS_METADATA_STORE.with(|r| r.borrow().len()) + } + + pub fn total_chunks() -> u64 { + FS_CHUNKS_STORE.with(|r| r.borrow().len()) + } + + pub fn total_folders() -> u64 { + FOLDERS.with(|r| r.borrow().len() as u64) + } + pub fn get_file_id(hash: &[u8; 32]) -> Option { HASHS.with(|r| r.borrow().get(hash).copied()) } @@ -852,7 +860,6 @@ pub mod fs { )?; s.folder_id = s.folder_id.saturating_add(1); - s.folder_count += 1; Ok(id) }) }) @@ -884,7 +891,6 @@ pub mod fs { } s.file_id = s.file_id.saturating_add(1); - s.file_count += 1; parent.files.insert(id); FS_METADATA_STORE.with(|r| r.borrow_mut().insert(id, metadata)); Ok(id) diff --git a/src/ic_oss_can/src/lib.rs b/src/ic_oss_can/src/lib.rs index 05e2619..b34b9c3 100644 --- a/src/ic_oss_can/src/lib.rs +++ b/src/ic_oss_can/src/lib.rs @@ -36,8 +36,6 @@ mod test { #[test] fn test_ic_oss_fs() { - fs::set_max_file_size(1024 * 1024); - let files = fs::list_files(u32::MAX, 2); assert!(files.is_empty()); diff --git a/src/ic_oss_can/src/store.rs b/src/ic_oss_can/src/store.rs index 3b872fe..e33cacb 100644 --- a/src/ic_oss_can/src/store.rs +++ b/src/ic_oss_can/src/store.rs @@ -4,6 +4,7 @@ #[macro_export] macro_rules! ic_oss_fs { () => { + #[allow(dead_code)] pub mod fs { use candid::Principal; use ciborium::{from_reader, into_writer}; @@ -65,6 +66,10 @@ macro_rules! ic_oss_fs { }); } + pub fn total_chunks() -> u64 { + FS_CHUNKS_STORE.with(|r| r.borrow().len()) + } + pub fn get_file(id: u32) -> Option { if id == 0 { return None; @@ -82,13 +87,12 @@ macro_rules! ic_oss_fs { Err(format!("file size exceeds limit: {}", r.max_file_size))?; } - let id = r.file_id.saturating_add(1); + let id = r.file_id; if id == u32::MAX { Err("file id overflow".to_string())?; } - r.file_id = id; - r.file_count += 1; + r.file_id = id.saturating_add(1); r.files.insert(id, file); Ok(id) }) diff --git a/src/ic_oss_can/src/types.rs b/src/ic_oss_can/src/types.rs index 705488b..412dcf0 100644 --- a/src/ic_oss_can/src/types.rs +++ b/src/ic_oss_can/src/types.rs @@ -11,10 +11,9 @@ use std::{ pub const MILLISECONDS: u64 = 1_000_000_000; -#[derive(Clone, Default, Deserialize, Serialize)] +#[derive(Clone, Deserialize, Serialize)] pub struct Files { pub file_id: u32, - pub file_count: u64, pub max_file_size: u64, pub visibility: u8, // 0: private; 1: public pub managers: BTreeSet, // managers can read and write @@ -41,6 +40,18 @@ impl Files { } } +impl Default for Files { + fn default() -> Self { + Self { + file_id: 1, // 0 is reserved for the Files data itself + max_file_size: MAX_FILE_SIZE, + visibility: 0, + managers: BTreeSet::new(), + files: BTreeMap::new(), + } + } +} + impl Storable for Files { const BOUND: Bound = Bound::Unbounded; diff --git a/src/ic_oss_types/src/bucket.rs b/src/ic_oss_types/src/bucket.rs index 386e3cf..215a294 100644 --- a/src/ic_oss_types/src/bucket.rs +++ b/src/ic_oss_types/src/bucket.rs @@ -8,9 +8,7 @@ use crate::{file::MAX_FILE_SIZE, ByteN}; #[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] pub struct BucketInfo { pub name: String, - pub file_count: u64, pub file_id: u32, - pub folder_count: u64, pub folder_id: u32, pub max_file_size: u64, pub max_folder_depth: u8, @@ -19,6 +17,9 @@ pub struct BucketInfo { pub enable_hash_index: bool, pub status: i8, // -1: archived; 0: readable and writable; 1: readonly pub visibility: u8, // 0: private; 1: public + pub total_files: u64, + pub total_chunks: u64, + pub total_folders: u64, pub managers: BTreeSet, // managers can read and write // auditors can read and list even if the bucket is private pub auditors: BTreeSet,