diff --git a/examples/change_password.rs b/examples/change_password.rs index 14b35b8d..fc01bd87 100644 --- a/examples/change_password.rs +++ b/examples/change_password.rs @@ -1,5 +1,6 @@ use core::str::FromStr; use rencfs::crypto::Cipher; +use rencfs::encryptedfs::EncryptedFilesystem; use rencfs::encryptedfs::{EncryptedFs, FsError}; use shush_rs::SecretString; use std::env::args; diff --git a/examples/change_password_cli.rs b/examples/change_password_cli.rs index 65fc05f9..7c006b08 100644 --- a/examples/change_password_cli.rs +++ b/examples/change_password_cli.rs @@ -6,6 +6,7 @@ use rpassword::read_password; use shush_rs::{ExposeSecret, SecretString}; use tracing::{error, info}; +use rencfs::encryptedfs::EncryptedFilesystem; use rencfs::encryptedfs::{EncryptedFs, FsError}; #[tokio::main] diff --git a/examples/encryptedfs.rs b/examples/encryptedfs.rs index d4fbdaa8..d38236db 100644 --- a/examples/encryptedfs.rs +++ b/examples/encryptedfs.rs @@ -7,7 +7,9 @@ use shush_rs::SecretString; use rencfs::crypto::Cipher; use rencfs::encryptedfs::write_all_string_to_fs; -use rencfs::encryptedfs::{CreateFileAttr, EncryptedFs, FileType, PasswordProvider}; +use rencfs::encryptedfs::{ + CreateFileAttr, EncryptedFilesystem, EncryptedFs, FileType, PasswordProvider, +}; const ROOT_INODE: u64 = 1; diff --git a/src/encryptedfs.rs b/src/encryptedfs.rs index 5179f346..494a2498 100644 --- a/src/encryptedfs.rs +++ b/src/encryptedfs.rs @@ -576,92 +576,129 @@ pub struct EncryptedFs { read_only: bool, } -impl EncryptedFs { +#[async_trait] +pub trait EncryptedFilesystem: Send + Sync { + fn exists(&self, ino: u64) -> bool; + fn is_dir(&self, ino: u64) -> bool; + fn is_file(&self, ino: u64) -> bool; + /// Create a new node in the filesystem #[allow(clippy::missing_panics_doc)] #[allow(clippy::missing_errors_doc)] - pub async fn new( - data_dir: PathBuf, - password_provider: Box, + #[allow(clippy::too_many_lines)] + async fn create( + &self, + parent: u64, + name: &SecretString, + create_attr: CreateFileAttr, + read: bool, + write: bool, + ) -> FsResult<(u64, FileAttr)>; + async fn find_by_name(&self, parent: u64, name: &SecretString) -> FsResult>; + /// Count children of a directory. This **EXCLUDES** "." and "..". + #[allow(clippy::missing_errors_doc)] + fn len(&self, ino: u64) -> FsResult; + /// Delete a directory + #[allow(clippy::missing_panics_doc)] + #[allow(clippy::missing_errors_doc)] + async fn remove_dir(&self, parent: u64, name: &SecretString) -> FsResult<()>; + /// Delete a file + #[allow(clippy::missing_panics_doc)] + #[allow(clippy::missing_errors_doc)] + async fn remove_file(&self, parent: u64, name: &SecretString) -> FsResult<()>; + fn exists_by_name(&self, parent: u64, name: &SecretString) -> FsResult; + async fn read_dir(&self, ino: u64) -> FsResult; + /// Like [`EncryptedFs::read_dir`] but with [`FileAttr`] so we don't need to query again for those. + async fn read_dir_plus(&self, ino: u64) -> FsResult; + /// Get metadata + #[allow(clippy::missing_errors_doc)] + async fn get_attr(&self, ino: u64) -> FsResult; + /// Set metadata + async fn set_attr(&self, ino: u64, set_attr: SetFileAttr) -> FsResult<()>; + /// Read the contents from an `offset`. + /// + /// If we try to read outside of file size, we return zero bytes. + /// If the file is not opened for read, it will return an error of type [FsError::InvalidFileHandle]. + #[allow(clippy::missing_errors_doc)] + #[allow(clippy::cast_possible_truncation)] + async fn read(&self, ino: u64, offset: u64, buf: &mut [u8], handle: u64) -> FsResult; + async fn release(&self, handle: u64) -> FsResult<()>; + /// Check if a file is opened for reading with this handle. + async fn is_read_handle(&self, fh: u64) -> bool; + /// Check if a file is opened for writing with this handle. + async fn is_write_handle(&self, fh: u64) -> bool; + /// Writes the contents of `buf` to the file with `ino` starting at `offset`. + /// + /// If we write outside file size, we fill up with zeros until the `offset`. + /// If the file is not opened for writing, + /// it will return an error of type [FsError::InvalidFileHandle]. + async fn write(&self, ino: u64, offset: u64, buf: &[u8], handle: u64) -> FsResult; + /// Flush the data to the underlying storage. + #[allow(clippy::missing_panics_doc)] + async fn flush(&self, handle: u64) -> FsResult<()>; + /// Helpful when we want to copy just some portions of the file. + async fn copy_file_range( + &self, + file_range_req: &CopyFileRangeReq, + size: usize, + ) -> FsResult; + /// Open a file. We can open multiple times for read but only one to write at a time. + #[allow(clippy::missing_panics_doc)] + async fn open(&self, ino: u64, read: bool, write: bool) -> FsResult; + /// Truncates or extends the underlying file, updating the size of this file to become size. + #[allow(clippy::missing_panics_doc)] + #[allow(clippy::too_many_lines)] + async fn set_len(&self, ino: u64, size: u64) -> FsResult<()>; + async fn rename( + &self, + parent: u64, + name: &SecretString, + new_parent: u64, + new_name: &SecretString, + ) -> FsResult<()>; + /// Create a crypto writer using internal encryption info. + async fn create_write( + &self, + file: W, + ) -> FsResult>>; + /// Create a crypto writer with seek using internal encryption info. + async fn create_write_seek( + &self, + file: W, + ) -> FsResult>>; + /// Create a crypto reader using internal encryption info. + async fn create_read( + &self, + reader: R, + ) -> FsResult>>; + /// Create a crypto reader with seek using internal encryption info. + async fn create_read_seek( + &self, + reader: R, + ) -> FsResult>>; + /// Change the password of the filesystem used to access the encryption key. + async fn passwd( + data_dir: &Path, + old_password: SecretString, + new_password: SecretString, cipher: Cipher, - read_only: bool, - ) -> FsResult> { - let key_provider = KeyProvider { - key_path: data_dir.join(SECURITY_DIR).join(KEY_ENC_FILENAME), - salt_path: data_dir.join(SECURITY_DIR).join(KEY_SALT_FILENAME), - password_provider, - cipher, - }; - let key = ExpireValue::new(key_provider, Duration::from_secs(10 * 60)); - - ensure_structure_created(&data_dir.clone()).await?; - key.get().await?; // this will check the password - - let fs = Self { - data_dir, - write_handles: RwLock::new(HashMap::new()), - read_handles: RwLock::new(HashMap::new()), - current_handle: AtomicU64::new(1), - cipher, - opened_files_for_read: RwLock::new(HashMap::new()), - opened_files_for_write: RwLock::new(HashMap::new()), - serialize_inode_locks: Arc::new(ArcHashMap::default()), - serialize_update_inode_locks: ArcHashMap::default(), - serialize_dir_entries_ls_locks: Arc::new(ArcHashMap::default()), - serialize_dir_entries_hash_locks: Arc::new(ArcHashMap::default()), - key, - self_weak: std::sync::Mutex::new(None), - read_write_locks: ArcHashMap::default(), - // todo: take duration from param - attr_cache: ExpireValue::new(AttrCacheProvider {}, Duration::from_secs(10 * 60)), - // todo: take duration from param - dir_entries_name_cache: ExpireValue::new( - DirEntryNameCacheProvider {}, - Duration::from_secs(10 * 60), - ), - // todo: take duration from param - dir_entries_meta_cache: ExpireValue::new( - DirEntryMetaCacheProvider {}, - Duration::from_secs(10 * 60), - ), - sizes_write: Mutex::default(), - sizes_read: Mutex::default(), - requested_read: Mutex::default(), - read_only, - }; - - let arc = Arc::new(fs); - arc.self_weak - .lock() - .expect("cannot obtain lock") - .replace(Arc::downgrade(&arc)); - - arc.ensure_root_exists().await?; - - Ok(arc) - } + ) -> FsResult<()>; +} - pub fn exists(&self, ino: u64) -> bool { +#[async_trait] +impl EncryptedFilesystem for EncryptedFs { + fn exists(&self, ino: u64) -> bool { self.ino_file(ino).is_file() } - pub fn is_dir(&self, ino: u64) -> bool { + fn is_dir(&self, ino: u64) -> bool { self.contents_path(ino).is_dir() } - pub fn is_file(&self, ino: u64) -> bool { + fn is_file(&self, ino: u64) -> bool { self.contents_path(ino).is_file() } - #[allow(dead_code)] - async fn is_read_only(&self) -> bool { - self.read_only - } - - /// Create a new node in the filesystem - #[allow(clippy::missing_panics_doc)] - #[allow(clippy::missing_errors_doc)] - #[allow(clippy::too_many_lines)] - pub async fn create( + async fn create( &self, parent: u64, name: &SecretString, @@ -820,11 +857,7 @@ impl EncryptedFs { #[allow(clippy::missing_panics_doc)] #[allow(clippy::missing_errors_doc)] - pub async fn find_by_name( - &self, - parent: u64, - name: &SecretString, - ) -> FsResult> { + async fn find_by_name(&self, parent: u64, name: &SecretString) -> FsResult> { if !self.exists(parent) { return Err(FsError::InodeNotFound); } @@ -851,9 +884,7 @@ impl EncryptedFs { self.get_inode_from_cache_or_storage(ino).await.map(Some) } - /// Count children of a directory. This **EXCLUDES** "." and "..". - #[allow(clippy::missing_errors_doc)] - pub fn len(&self, ino: u64) -> FsResult { + fn len(&self, ino: u64) -> FsResult { if !self.is_dir(ino) { return Err(FsError::InvalidInodeType); } @@ -868,10 +899,7 @@ impl EncryptedFs { Ok(count) } - /// Delete a directory - #[allow(clippy::missing_panics_doc)] - #[allow(clippy::missing_errors_doc)] - pub async fn remove_dir(&self, parent: u64, name: &SecretString) -> FsResult<()> { + async fn remove_dir(&self, parent: u64, name: &SecretString) -> FsResult<()> { if !self.is_dir(parent) { return Err(FsError::InvalidInodeType); } @@ -945,10 +973,7 @@ impl EncryptedFs { .await? } - /// Delete a file - #[allow(clippy::missing_panics_doc)] - #[allow(clippy::missing_errors_doc)] - pub async fn remove_file(&self, parent: u64, name: &SecretString) -> FsResult<()> { + async fn remove_file(&self, parent: u64, name: &SecretString) -> FsResult<()> { if !self.is_dir(parent) { return Err(FsError::InvalidInodeType); } @@ -1019,7 +1044,7 @@ impl EncryptedFs { #[allow(clippy::missing_panics_doc)] #[allow(clippy::missing_errors_doc)] - pub fn exists_by_name(&self, parent: u64, name: &SecretString) -> FsResult { + fn exists_by_name(&self, parent: u64, name: &SecretString) -> FsResult { if !self.exists(parent) { return Err(FsError::InodeNotFound); } @@ -1032,7 +1057,7 @@ impl EncryptedFs { } #[allow(clippy::missing_errors_doc)] - pub async fn read_dir(&self, ino: u64) -> FsResult { + async fn read_dir(&self, ino: u64) -> FsResult { if !self.is_dir(ino) { return Err(FsError::InvalidInodeType); } @@ -1047,8 +1072,7 @@ impl EncryptedFs { Ok(self.create_directory_entry_iterator(iter).await) } - /// Like [`EncryptedFs::read_dir`] but with [`FileAttr`] so we don't need to query again for those. - pub async fn read_dir_plus(&self, ino: u64) -> FsResult { + async fn read_dir_plus(&self, ino: u64) -> FsResult { if !self.is_dir(ino) { return Err(FsError::InvalidInodeType); } @@ -1063,303 +1087,51 @@ impl EncryptedFs { Ok(self.create_directory_entry_plus_iterator(iter).await) } - async fn create_directory_entry_plus( - &self, - entry: io::Result, - ) -> FsResult { - let entry = self.create_directory_entry(entry).await?; - let lock = self.serialize_inode_locks.clone(); - let lock_ino = lock.get_or_insert_with(entry.ino, || RwLock::new(false)); - let _ino_guard = lock_ino.read(); - let attr = self.get_inode_from_cache_or_storage(entry.ino).await?; - Ok(DirectoryEntryPlus { - ino: entry.ino, - name: entry.name, - kind: entry.kind, - attr, - }) - } + async fn get_attr(&self, ino: u64) -> FsResult { + let mut attr = self.get_inode_from_cache_or_storage(ino).await?; - async fn create_directory_entry_plus_iterator( - &self, - read_dir: ReadDir, - ) -> DirectoryEntryPlusIterator { - #[allow(clippy::cast_possible_truncation)] - let futures: Vec<_> = read_dir - .into_iter() - .map(|entry| { - let fs = { - self.self_weak - .lock() - .unwrap() - .as_ref() - .unwrap() - .upgrade() - .unwrap() - }; - DIR_ENTRIES_RT.spawn(async move { fs.create_directory_entry_plus(entry).await }) - }) - .collect(); + // merge time info with any open read handles + let open_reads = { self.opened_files_for_read.read().await.contains_key(&ino) }; + if open_reads { + let fhs = self.opened_files_for_read.read().await.get(&ino).cloned(); + if let Some(fhs) = fhs { + for fh in fhs { + let lock = self.read_handles.read().await; + if let Some(ctx) = lock.get(&fh) { + let set_atr: SetFileAttr = ctx.lock().await.attr.clone().into(); + merge_attr(&mut attr, &set_atr, false); + } + } + } + } - // do these futures in parallel and return them - let mut res = VecDeque::with_capacity(futures.len()); - for f in futures { - res.push_back(f.await.unwrap()); + // merge time info and size with any open write handles + let open_writes = { self.opened_files_for_write.read().await.contains_key(&ino) }; + if open_writes { + let fh = self.opened_files_for_write.read().await.get(&ino).copied(); + if let Some(fh) = fh { + let lock = self.write_handles.read().await; + if let Some(ctx) = lock.get(&fh) { + let ctx = ctx.lock().await; + merge_attr(&mut attr, &ctx.attr.clone().into(), false); + } + } } - DirectoryEntryPlusIterator(res) + + Ok(attr) } - async fn create_directory_entry( - &self, - entry: io::Result, - ) -> FsResult { - if entry.is_err() { - return Err(entry.err().unwrap().into()); + async fn set_attr(&self, ino: u64, set_attr: SetFileAttr) -> FsResult<()> { + if self.read_only { + return Err(FsError::ReadOnly); } - if let Err(e) = entry { - error!(err = %e, "reading directory entry"); - return Err(e.into()); - } - let entry = entry.unwrap(); - let name = entry.file_name().to_string_lossy().to_string(); - let name = { - if name == "$." { - SecretString::new(Box::new(".".into())) - } else if name == "$.." { - SecretString::from_str("..").unwrap() - } else { - // try from cache - let lock = self.get_dir_entries_name_cache().await?; - let mut cache = lock.lock().await; - if let Some(name_cached) = cache.get(&name).cloned() { - name_cached - } else { - drop(cache); - if let Ok(decrypted_name) = - crypto::decrypt_file_name(&name, self.cipher, &*self.key.get().await?) - .map_err(|err| { - error!(err = %err, "decrypting file name"); - err - }) - { - lock.lock().await.put(name.clone(), decrypted_name.clone()); - decrypted_name - } else { - return Err(FsError::InvalidInput("invalid file name")); - } - } - } - }; - let file_path = entry.path().to_str().unwrap().to_string(); - // try from cache - let lock = self.dir_entries_meta_cache.get().await?; - let mut cache = lock.lock().await; - if let Some((ino, kind)) = cache.get(&file_path) { - return Ok(DirectoryEntry { - ino: *ino, - name, - kind: *kind, - }); - } - drop(cache); - let lock = self - .serialize_dir_entries_ls_locks - .get_or_insert_with(file_path.clone(), || RwLock::new(false)); - let guard = lock.read().await; - let file = File::open(entry.path())?; - let res: bincode::Result<(u64, FileType)> = bincode::deserialize_from(crypto::create_read( - file, - self.cipher, - &*self.key.get().await?, - )); - drop(guard); - if let Err(e) = res { - error!(err = %e, "deserializing directory entry"); - return Err(e.into()); - } - let (ino, kind): (u64, FileType) = res.unwrap(); - // add to cache - self.dir_entries_meta_cache - .get() - .await? - .lock() - .await - .put(file_path, (ino, kind)); - Ok(DirectoryEntry { ino, name, kind }) - } - - async fn get_dir_entries_name_cache( - &self, - ) -> FsResult>>> { - self.dir_entries_name_cache.get().await - } - - async fn create_directory_entry_iterator(&self, read_dir: ReadDir) -> DirectoryEntryIterator { - #[allow(clippy::cast_possible_truncation)] - let futures: Vec<_> = read_dir - .into_iter() - .map(|entry| { - let fs = { - self.self_weak - .lock() - .unwrap() - .as_ref() - .unwrap() - .upgrade() - .unwrap() - }; - DIR_ENTRIES_RT.spawn(async move { fs.create_directory_entry(entry).await }) - }) - .collect(); - - // do these futures in parallel and return them - let mut res = VecDeque::with_capacity(futures.len()); - for f in futures { - res.push_back(f.await.unwrap()); - } - DirectoryEntryIterator(res) - } - - #[allow(clippy::missing_errors_doc)] - async fn get_inode_from_storage(&self, ino: u64) -> FsResult { - let lock = self - .serialize_inode_locks - .get_or_insert_with(ino, || RwLock::new(false)); - let _guard = lock.read(); - - let path = self.ino_file(ino); - if !path.is_file() { - return Err(FsError::InodeNotFound); - } - let file = OpenOptions::new().read(true).open(path).map_err(|err| { - error!(err = %err, "opening file"); - FsError::InodeNotFound - })?; - Ok(bincode::deserialize_from(crypto::create_read( - file, - self.cipher, - &*self.key.get().await?, - ))?) - } - - async fn get_inode_from_cache_or_storage(&self, ino: u64) -> FsResult { - let lock = self.attr_cache.get().await?; - let mut guard = lock.write().await; - let attr = guard.get(&ino); - if let Some(attr) = attr { - Ok(*attr) - } else { - drop(guard); - let attr = self.get_inode_from_storage(ino).await?; - let mut guard = lock.write().await; - guard.put(ino, attr); - Ok(attr) - } - } - - /// Get metadata - #[allow(clippy::missing_errors_doc)] - pub async fn get_attr(&self, ino: u64) -> FsResult { - let mut attr = self.get_inode_from_cache_or_storage(ino).await?; - - // merge time info with any open read handles - let open_reads = { self.opened_files_for_read.read().await.contains_key(&ino) }; - if open_reads { - let fhs = self.opened_files_for_read.read().await.get(&ino).cloned(); - if let Some(fhs) = fhs { - for fh in fhs { - let lock = self.read_handles.read().await; - if let Some(ctx) = lock.get(&fh) { - let set_atr: SetFileAttr = ctx.lock().await.attr.clone().into(); - merge_attr(&mut attr, &set_atr, false); - } - } - } - } - - // merge time info and size with any open write handles - let open_writes = { self.opened_files_for_write.read().await.contains_key(&ino) }; - if open_writes { - let fh = self.opened_files_for_write.read().await.get(&ino).copied(); - if let Some(fh) = fh { - let lock = self.write_handles.read().await; - if let Some(ctx) = lock.get(&fh) { - let ctx = ctx.lock().await; - merge_attr(&mut attr, &ctx.attr.clone().into(), false); - } - } - } - - Ok(attr) - } - - /// Set metadata - pub async fn set_attr(&self, ino: u64, set_attr: SetFileAttr) -> FsResult<()> { - if self.read_only { - return Err(FsError::ReadOnly); - } - self.set_attr2(ino, set_attr, false).await - } - - async fn set_attr2( - &self, - ino: u64, - set_attr: SetFileAttr, - overwrite_size: bool, - ) -> FsResult<()> { - let serialize_update_lock = self - .serialize_update_inode_locks - .get_or_insert_with(ino, || Mutex::new(false)); - let _serialize_update_guard = serialize_update_lock.lock().await; - - let mut attr = self.get_attr(ino).await?; - merge_attr(&mut attr, &set_attr, overwrite_size); - let now = SystemTime::now(); - attr.ctime = now; - attr.atime = now; - - self.write_inode_to_storage(&attr).await?; - - Ok(()) - } - - async fn write_inode_to_storage(&self, attr: &FileAttr) -> Result<(), FsError> { - let lock = self - .serialize_inode_locks - .get_or_insert_with(attr.ino, || RwLock::new(false)); - let guard = lock.write().await; - crypto::atomic_serialize_encrypt_into( - &self.ino_file(attr.ino), - attr, - self.cipher, - &*self.key.get().await?, - )?; - drop(guard); - // update cache also - { - let lock = self.attr_cache.get().await?; - let mut guard = lock.write().await; - guard.put(attr.ino, *attr); - } - Ok(()) - } - - /// Read the contents from an `offset`. - /// - /// If we try to read outside of file size, we return zero bytes. - /// If the file is not opened for read, it will return an error of type [FsError::InvalidFileHandle]. - #[instrument(skip(self, buf), fields(len = %buf.len()), ret(level = Level::DEBUG))] - #[allow(clippy::missing_errors_doc)] - #[allow(clippy::cast_possible_truncation)] - pub async fn read( - &self, - ino: u64, - offset: u64, - buf: &mut [u8], - handle: u64, - ) -> FsResult { - if !self.exists(ino) { - return Err(FsError::InodeNotFound); + self.set_attr2(ino, set_attr, false).await + } + + #[instrument(skip(self, buf), fields(len = %buf.len()), ret(level = Level::DEBUG))] + async fn read(&self, ino: u64, offset: u64, buf: &mut [u8], handle: u64) -> FsResult { + if !self.exists(ino) { + return Err(FsError::InodeNotFound); } if !self.is_file(ino) { return Err(FsError::InvalidInodeType); @@ -1458,7 +1230,7 @@ impl EncryptedFs { #[allow(clippy::missing_panics_doc)] #[allow(clippy::too_many_lines)] - pub async fn release(&self, handle: u64) -> FsResult<()> { + async fn release(&self, handle: u64) -> FsResult<()> { if handle == 0 { // in the case of directory or if the file was crated // without being opened we don't use a handle @@ -1568,23 +1340,16 @@ impl EncryptedFs { Ok(()) } - /// Check if a file is opened for reading with this handle. - pub async fn is_read_handle(&self, fh: u64) -> bool { + async fn is_read_handle(&self, fh: u64) -> bool { self.read_handles.read().await.contains_key(&fh) } - /// Check if a file is opened for writing with this handle. - pub async fn is_write_handle(&self, fh: u64) -> bool { + async fn is_write_handle(&self, fh: u64) -> bool { self.write_handles.read().await.contains_key(&fh) } - /// Writes the contents of `buf` to the file with `ino` starting at `offset`. - /// - /// If we write outside file size, we fill up with zeros until the `offset`. - /// If the file is not opened for writing, - /// it will return an error of type [FsError::InvalidFileHandle]. #[instrument(skip(self, buf), fields(len = %buf.len()), ret(level = Level::DEBUG))] - pub async fn write(&self, ino: u64, offset: u64, buf: &[u8], handle: u64) -> FsResult { + async fn write(&self, ino: u64, offset: u64, buf: &[u8], handle: u64) -> FsResult { if self.read_only { return Err(FsError::ReadOnly); } @@ -1690,9 +1455,7 @@ impl EncryptedFs { Ok(len) } - /// Flush the data to the underlying storage. - #[allow(clippy::missing_panics_doc)] - pub async fn flush(&self, handle: u64) -> FsResult<()> { + async fn flush(&self, handle: u64) -> FsResult<()> { if handle == 0 { // in the case of directory or if the file was crated without being opened we don't use a handle return Ok(()); @@ -1726,8 +1489,7 @@ impl EncryptedFs { Ok(()) } - /// Helpful when we want to copy just some portions of the file. - pub async fn copy_file_range( + async fn copy_file_range( &self, file_range_req: &CopyFileRangeReq, size: usize, @@ -1770,9 +1532,7 @@ impl EncryptedFs { Ok(len) } - /// Open a file. We can open multiple times for read but only one to write at a time. - #[allow(clippy::missing_panics_doc)] - pub async fn open(&self, ino: u64, read: bool, write: bool) -> FsResult { + async fn open(&self, ino: u64, read: bool, write: bool) -> FsResult { if write && self.read_only { return Err(FsError::ReadOnly); } @@ -1836,10 +1596,7 @@ impl EncryptedFs { Ok(fh) } - /// Truncates or extends the underlying file, updating the size of this file to become size. - #[allow(clippy::missing_panics_doc)] - #[allow(clippy::too_many_lines)] - pub async fn set_len(&self, ino: u64, size: u64) -> FsResult<()> { + async fn set_len(&self, ino: u64, size: u64) -> FsResult<()> { if self.read_only { return Err(FsError::ReadOnly); } @@ -1925,79 +1682,32 @@ impl EncryptedFs { Ok(()) } - /// This will write any dirty data to the file from all writers and reset them. - /// Timestamps and size will be updated to the storage. - /// > ⚠️ **Warning** - /// > Need to be called in a context with write lock on `self.read_write_inode.lock().await.get(ino)`. - /// > That is because we want to make sure caller is holding a lock while all writers flush and we can't - /// > lock here also as we would end-up in a deadlock. - async fn flush_and_reset_writers(&self, ino: u64) -> FsResult<()> { + #[allow(clippy::missing_panics_doc)] + async fn rename( + &self, + parent: u64, + name: &SecretBox, + new_parent: u64, + new_name: &SecretBox, + ) -> FsResult<()> { if self.read_only { return Err(FsError::ReadOnly); } - let opened_files_for_write_guard = self.opened_files_for_write.read().await; - let handle = opened_files_for_write_guard.get(&ino); - if let Some(handle) = handle { - let write_handles_guard = self.write_handles.write().await; - let ctx = write_handles_guard.get(handle); - if let Some(lock) = ctx { - let mut ctx = lock.lock().await; - - let mut writer = ctx.writer.take().unwrap(); - let file = writer.finish()?; - file.sync_all()?; - File::open(self.contents_path(ctx.ino).parent().unwrap())?.sync_all()?; - let handle = *handle; - let set_attr: SetFileAttr = ctx.attr.clone().into(); - drop(ctx); - drop(opened_files_for_write_guard); - drop(write_handles_guard); - self.set_attr(ino, set_attr).await?; - self.reset_handles(ino, Some(handle), true).await?; - let write_handles_guard = self.write_handles.write().await; - let mut ctx = write_handles_guard.get(&handle).unwrap().lock().await; - let writer = self - .create_write_seek( - OpenOptions::new() - .read(true) - .write(true) - .open(self.contents_path(ino))?, - ) - .await?; - ctx.writer = Some(Box::new(writer)); - let attr = self.get_inode_from_storage(ino).await?; - ctx.attr = attr.into(); - } - } - Ok(()) - } - - #[allow(clippy::missing_panics_doc)] - pub async fn rename( - &self, - parent: u64, - name: &SecretBox, - new_parent: u64, - new_name: &SecretBox, - ) -> FsResult<()> { - if self.read_only { - return Err(FsError::ReadOnly); - } - if !self.exists(parent) { - return Err(FsError::InodeNotFound); - } - if !self.is_dir(parent) { - return Err(FsError::InvalidInodeType); - } - if !self.exists(new_parent) { - return Err(FsError::InodeNotFound); - } - if !self.is_dir(new_parent) { - return Err(FsError::InvalidInodeType); - } - if !self.exists_by_name(parent, name)? { - return Err(FsError::NotFound("name not found")); - } + if !self.exists(parent) { + return Err(FsError::InodeNotFound); + } + if !self.is_dir(parent) { + return Err(FsError::InvalidInodeType); + } + if !self.exists(new_parent) { + return Err(FsError::InodeNotFound); + } + if !self.is_dir(new_parent) { + return Err(FsError::InvalidInodeType); + } + if !self.exists_by_name(parent, name)? { + return Err(FsError::NotFound("name not found")); + } if parent == new_parent && name.expose_secret() == new_name.expose_secret() { // no-op @@ -2064,56 +1774,51 @@ impl EncryptedFs { Ok(()) } - /// Create a crypto writer using internal encryption info. - pub async fn create_write( + async fn create_write( &self, file: W, - ) -> FsResult> { - Ok(crypto::create_write( + ) -> FsResult>> { + Ok(Box::new(crypto::create_write( file, self.cipher, &*self.key.get().await?, - )) + ))) } - /// Create a crypto writer with seek using internal encryption info. - pub async fn create_write_seek( + async fn create_write_seek( &self, file: W, - ) -> FsResult> { - Ok(crypto::create_write_seek( + ) -> FsResult>> { + Ok(Box::new(crypto::create_write_seek( file, self.cipher, &*self.key.get().await?, - )) + ))) } - /// Create a crypto reader using internal encryption info. - pub async fn create_read( + async fn create_read( &self, reader: R, - ) -> FsResult> { - Ok(crypto::create_read( + ) -> FsResult>> { + Ok(Box::new(crypto::create_read( reader, self.cipher, &*self.key.get().await?, - )) + ))) } - /// Create a crypto reader with seek using internal encryption info. - pub async fn create_read_seek( + async fn create_read_seek( &self, reader: R, - ) -> FsResult> { - Ok(crypto::create_read_seek( + ) -> FsResult>> { + Ok(Box::new(crypto::create_read_seek( reader, self.cipher, &*self.key.get().await?, - )) + ))) } - /// Change the password of the filesystem used to access the encryption key. - pub async fn passwd( + async fn passwd( data_dir: &Path, old_password: SecretBox, new_password: SecretBox, @@ -2140,6 +1845,360 @@ impl EncryptedFs { )?; Ok(()) } +} + +impl EncryptedFs { + #[allow(clippy::missing_panics_doc)] + #[allow(clippy::missing_errors_doc)] + pub async fn new( + data_dir: PathBuf, + password_provider: Box, + cipher: Cipher, + read_only: bool, + ) -> FsResult> { + let key_provider = KeyProvider { + key_path: data_dir.join(SECURITY_DIR).join(KEY_ENC_FILENAME), + salt_path: data_dir.join(SECURITY_DIR).join(KEY_SALT_FILENAME), + password_provider, + cipher, + }; + let key = ExpireValue::new(key_provider, Duration::from_secs(10 * 60)); + + ensure_structure_created(&data_dir.clone()).await?; + key.get().await?; // this will check the password + + let fs = Self { + data_dir, + write_handles: RwLock::new(HashMap::new()), + read_handles: RwLock::new(HashMap::new()), + current_handle: AtomicU64::new(1), + cipher, + opened_files_for_read: RwLock::new(HashMap::new()), + opened_files_for_write: RwLock::new(HashMap::new()), + serialize_inode_locks: Arc::new(ArcHashMap::default()), + serialize_update_inode_locks: ArcHashMap::default(), + serialize_dir_entries_ls_locks: Arc::new(ArcHashMap::default()), + serialize_dir_entries_hash_locks: Arc::new(ArcHashMap::default()), + key, + self_weak: std::sync::Mutex::new(None), + read_write_locks: ArcHashMap::default(), + // todo: take duration from param + attr_cache: ExpireValue::new(AttrCacheProvider {}, Duration::from_secs(10 * 60)), + // todo: take duration from param + dir_entries_name_cache: ExpireValue::new( + DirEntryNameCacheProvider {}, + Duration::from_secs(10 * 60), + ), + // todo: take duration from param + dir_entries_meta_cache: ExpireValue::new( + DirEntryMetaCacheProvider {}, + Duration::from_secs(10 * 60), + ), + sizes_write: Mutex::default(), + sizes_read: Mutex::default(), + requested_read: Mutex::default(), + read_only, + }; + + let arc = Arc::new(fs); + arc.self_weak + .lock() + .expect("cannot obtain lock") + .replace(Arc::downgrade(&arc)); + + arc.ensure_root_exists().await?; + + Ok(arc) + } + + #[allow(dead_code)] + async fn is_read_only(&self) -> bool { + self.read_only + } + + async fn create_directory_entry_plus( + &self, + entry: io::Result, + ) -> FsResult { + let entry = self.create_directory_entry(entry).await?; + let lock = self.serialize_inode_locks.clone(); + let lock_ino = lock.get_or_insert_with(entry.ino, || RwLock::new(false)); + let _ino_guard = lock_ino.read(); + let attr = self.get_inode_from_cache_or_storage(entry.ino).await?; + Ok(DirectoryEntryPlus { + ino: entry.ino, + name: entry.name, + kind: entry.kind, + attr, + }) + } + + async fn create_directory_entry_plus_iterator( + &self, + read_dir: ReadDir, + ) -> DirectoryEntryPlusIterator { + #[allow(clippy::cast_possible_truncation)] + let futures: Vec<_> = read_dir + .into_iter() + .map(|entry| { + let fs = { + self.self_weak + .lock() + .unwrap() + .as_ref() + .unwrap() + .upgrade() + .unwrap() + }; + DIR_ENTRIES_RT.spawn(async move { fs.create_directory_entry_plus(entry).await }) + }) + .collect(); + + // do these futures in parallel and return them + let mut res = VecDeque::with_capacity(futures.len()); + for f in futures { + res.push_back(f.await.unwrap()); + } + DirectoryEntryPlusIterator(res) + } + + async fn create_directory_entry( + &self, + entry: io::Result, + ) -> FsResult { + if entry.is_err() { + return Err(entry.err().unwrap().into()); + } + if let Err(e) = entry { + error!(err = %e, "reading directory entry"); + return Err(e.into()); + } + let entry = entry.unwrap(); + let name = entry.file_name().to_string_lossy().to_string(); + let name = { + if name == "$." { + SecretString::from_str(".").unwrap() + } else if name == "$.." { + SecretString::from_str("..").unwrap() + } else { + // try from cache + let lock = self.get_dir_entries_name_cache().await?; + let mut cache = lock.lock().await; + if let Some(name_cached) = cache.get(&name).cloned() { + name_cached + } else { + drop(cache); + if let Ok(decrypted_name) = + crypto::decrypt_file_name(&name, self.cipher, &*self.key.get().await?) + .map_err(|err| { + error!(err = %err, "decrypting file name"); + err + }) + { + lock.lock().await.put(name.clone(), decrypted_name.clone()); + decrypted_name + } else { + return Err(FsError::InvalidInput("invalid file name")); + } + } + } + }; + let file_path = entry.path().to_str().unwrap().to_string(); + // try from cache + let lock = self.dir_entries_meta_cache.get().await?; + let mut cache = lock.lock().await; + if let Some((ino, kind)) = cache.get(&file_path) { + return Ok(DirectoryEntry { + ino: *ino, + name, + kind: *kind, + }); + } + drop(cache); + let lock = self + .serialize_dir_entries_ls_locks + .get_or_insert_with(file_path.clone(), || RwLock::new(false)); + let guard = lock.read().await; + let file = File::open(entry.path())?; + let res: bincode::Result<(u64, FileType)> = bincode::deserialize_from(crypto::create_read( + file, + self.cipher, + &*self.key.get().await?, + )); + drop(guard); + if let Err(e) = res { + error!(err = %e, "deserializing directory entry"); + return Err(e.into()); + } + let (ino, kind): (u64, FileType) = res.unwrap(); + // add to cache + self.dir_entries_meta_cache + .get() + .await? + .lock() + .await + .put(file_path, (ino, kind)); + Ok(DirectoryEntry { ino, name, kind }) + } + + async fn get_dir_entries_name_cache( + &self, + ) -> FsResult>>> { + self.dir_entries_name_cache.get().await + } + + async fn create_directory_entry_iterator(&self, read_dir: ReadDir) -> DirectoryEntryIterator { + #[allow(clippy::cast_possible_truncation)] + let futures: Vec<_> = read_dir + .into_iter() + .map(|entry| { + let fs = { + self.self_weak + .lock() + .unwrap() + .as_ref() + .unwrap() + .upgrade() + .unwrap() + }; + DIR_ENTRIES_RT.spawn(async move { fs.create_directory_entry(entry).await }) + }) + .collect(); + + // do these futures in parallel and return them + let mut res = VecDeque::with_capacity(futures.len()); + for f in futures { + res.push_back(f.await.unwrap()); + } + DirectoryEntryIterator(res) + } + + #[allow(clippy::missing_errors_doc)] + async fn get_inode_from_storage(&self, ino: u64) -> FsResult { + let lock = self + .serialize_inode_locks + .get_or_insert_with(ino, || RwLock::new(false)); + let _guard = lock.read(); + + let path = self.ino_file(ino); + if !path.is_file() { + return Err(FsError::InodeNotFound); + } + let file = OpenOptions::new().read(true).open(path).map_err(|err| { + error!(err = %err, "opening file"); + FsError::InodeNotFound + })?; + Ok(bincode::deserialize_from(crypto::create_read( + file, + self.cipher, + &*self.key.get().await?, + ))?) + } + + async fn get_inode_from_cache_or_storage(&self, ino: u64) -> FsResult { + let lock = self.attr_cache.get().await?; + let mut guard = lock.write().await; + let attr = guard.get(&ino); + if let Some(attr) = attr { + Ok(*attr) + } else { + drop(guard); + let attr = self.get_inode_from_storage(ino).await?; + let mut guard = lock.write().await; + guard.put(ino, attr); + Ok(attr) + } + } + + async fn set_attr2( + &self, + ino: u64, + set_attr: SetFileAttr, + overwrite_size: bool, + ) -> FsResult<()> { + let serialize_update_lock = self + .serialize_update_inode_locks + .get_or_insert_with(ino, || Mutex::new(false)); + let _serialize_update_guard = serialize_update_lock.lock().await; + + let mut attr = self.get_attr(ino).await?; + merge_attr(&mut attr, &set_attr, overwrite_size); + let now = SystemTime::now(); + attr.ctime = now; + attr.atime = now; + + self.write_inode_to_storage(&attr).await?; + + Ok(()) + } + + async fn write_inode_to_storage(&self, attr: &FileAttr) -> Result<(), FsError> { + let lock = self + .serialize_inode_locks + .get_or_insert_with(attr.ino, || RwLock::new(false)); + let guard = lock.write().await; + crypto::atomic_serialize_encrypt_into( + &self.ino_file(attr.ino), + attr, + self.cipher, + &*self.key.get().await?, + )?; + drop(guard); + // update cache also + { + let lock = self.attr_cache.get().await?; + let mut guard = lock.write().await; + guard.put(attr.ino, *attr); + } + Ok(()) + } + + /// This will write any dirty data to the file from all writers and reset them. + /// Timestamps and size will be updated to the storage. + /// > ⚠️ **Warning** + /// > Need to be called in a context with write lock on `self.read_write_inode.lock().await.get(ino)`. + /// > That is because we want to make sure caller is holding a lock while all writers flush and we can't + /// > lock here also as we would end-up in a deadlock. + async fn flush_and_reset_writers(&self, ino: u64) -> FsResult<()> { + if self.read_only { + return Err(FsError::ReadOnly); + } + let opened_files_for_write_guard = self.opened_files_for_write.read().await; + let handle = opened_files_for_write_guard.get(&ino); + if let Some(handle) = handle { + let write_handles_guard = self.write_handles.write().await; + let ctx = write_handles_guard.get(handle); + if let Some(lock) = ctx { + let mut ctx = lock.lock().await; + + let mut writer = ctx.writer.take().unwrap(); + let file = writer.finish()?; + file.sync_all()?; + File::open(self.contents_path(ctx.ino).parent().unwrap())?.sync_all()?; + let handle = *handle; + let set_attr: SetFileAttr = ctx.attr.clone().into(); + drop(ctx); + drop(opened_files_for_write_guard); + drop(write_handles_guard); + self.set_attr(ino, set_attr).await?; + self.reset_handles(ino, Some(handle), true).await?; + let write_handles_guard = self.write_handles.write().await; + let mut ctx = write_handles_guard.get(&handle).unwrap().lock().await; + let writer = self + .create_write_seek( + OpenOptions::new() + .read(true) + .write(true) + .open(self.contents_path(ino))?, + ) + .await?; + ctx.writer = Some(writer); + let attr = self.get_inode_from_storage(ino).await?; + ctx.attr = attr.into(); + } + } + Ok(()) + } fn next_handle(&self) -> u64 { self.current_handle @@ -2177,7 +2236,7 @@ impl EncryptedFs { let attr = self.get_inode_from_storage(ino).await?; let mut ctx = guard.get(handle).unwrap().lock().await; let reader = self.create_read_seek(File::open(&path)?).await?; - ctx.reader = Some(Box::new(reader)); + ctx.reader = Some(reader); ctx.attr = attr.into(); } } @@ -2210,7 +2269,7 @@ impl EncryptedFs { .create_write_seek(OpenOptions::new().read(true).write(true).open(&path)?) .await?; let mut ctx = lock.lock().await; - ctx.writer = Some(Box::new(writer)); + ctx.writer = Some(writer); let attr = self.get_inode_from_storage(ino).await?; ctx.attr = attr.into(); } @@ -2234,7 +2293,7 @@ impl EncryptedFs { let ctx = ReadHandleContext { ino, attr, - reader: Some(Box::new(reader)), + reader: Some(reader), }; self.read_handles .write() @@ -2267,7 +2326,7 @@ impl EncryptedFs { let ctx = WriteHandleContext { ino, attr, - writer: Some(Box::new(writer)), + writer: Some(writer), }; self.write_handles .write() @@ -2449,6 +2508,7 @@ impl EncryptedFs { } } } + pub struct CopyFileRangeReq { src_ino: u64, src_offset: u64, diff --git a/src/encryptedfs/bench.rs b/src/encryptedfs/bench.rs index 9508f436..98819566 100644 --- a/src/encryptedfs/bench.rs +++ b/src/encryptedfs/bench.rs @@ -8,6 +8,8 @@ use rand::Rng; #[allow(unused_imports)] use shush_rs::SecretString; +#[allow(unused_imports)] +use crate::encryptedfs::EncryptedFilesystem; #[allow(unused_imports)] use crate::encryptedfs::{DirectoryEntry, DirectoryEntryPlus, FileType, ROOT_INODE}; #[allow(unused_imports)] diff --git a/src/encryptedfs/test.rs b/src/encryptedfs/test.rs index b2091724..7c9d000a 100644 --- a/src/encryptedfs/test.rs +++ b/src/encryptedfs/test.rs @@ -7,6 +7,7 @@ use tracing_test::traced_test; use crate::crypto::Cipher; use crate::encryptedfs::write_all_bytes_to_fs; +use crate::encryptedfs::EncryptedFilesystem; use crate::encryptedfs::INODES_DIR; use crate::encryptedfs::KEY_ENC_FILENAME; use crate::encryptedfs::KEY_SALT_FILENAME; diff --git a/src/mount/linux.rs b/src/mount/linux.rs index d6d3cc9d..81e01ce4 100644 --- a/src/mount/linux.rs +++ b/src/mount/linux.rs @@ -30,6 +30,7 @@ use tracing::{debug, error, instrument, trace, warn}; use tracing::{info, Level}; use crate::crypto::Cipher; +use crate::encryptedfs::EncryptedFilesystem; use crate::encryptedfs::{ CopyFileRangeReq, CreateFileAttr, EncryptedFs, FileAttr, FileType, FsError, FsResult, PasswordProvider, SetFileAttr, diff --git a/src/run.rs b/src/run.rs index 01b7ae6c..a21c9f6a 100644 --- a/src/run.rs +++ b/src/run.rs @@ -17,6 +17,7 @@ use tracing::{error, info, warn, Level}; use crate::keyring; use rencfs::crypto::Cipher; +use rencfs::encryptedfs::EncryptedFilesystem; use rencfs::encryptedfs::{EncryptedFs, FsError, PasswordProvider}; use rencfs::mount::MountPoint; use rencfs::{log, mount}; diff --git a/src/test_common.rs b/src/test_common.rs index bd047378..38836502 100644 --- a/src/test_common.rs +++ b/src/test_common.rs @@ -11,6 +11,7 @@ use thread_local::ThreadLocal; use tokio::sync::Mutex; use crate::crypto::Cipher; +use crate::encryptedfs::EncryptedFilesystem; use crate::encryptedfs::{ CopyFileRangeReq, CreateFileAttr, EncryptedFs, FileType, PasswordProvider, };