diff --git a/Cargo.toml b/Cargo.toml index 3496cba..bf9b915 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/jaemk/cached" documentation = "https://docs.rs/cached" readme = "README.md" categories = ["caching", "data-structures"] -keywords = ["caching", "cache", "memoize", "lru", "redis"] +keywords = ["caching", "cache", "memoize", "lru", "redis", "disk"] license = "MIT" edition = "2018" @@ -26,6 +26,7 @@ redis_connection_manager = ["redis_store", "redis/connection-manager"] redis_async_std = ["redis_store", "async", "redis/aio", "redis/async-std-comp", "redis/tls", "redis/async-std-tls-comp"] redis_tokio = ["redis_store", "async", "redis/aio", "redis/tokio-comp", "redis/tls", "redis/tokio-native-tls-comp"] redis_ahash = ["redis_store", "redis/ahash"] +disk_store = ["sled", "serde", "rmp-serde", "directories"] wasm = ["instant/wasm-bindgen"] [dependencies.cached_proc_macro] @@ -68,6 +69,18 @@ version = "0.24" features = ["r2d2"] optional = true +[dependencies.sled] +version = "0.34" +optional = true + +[dependencies.rmp-serde] +version ="1.1" +optional = true + +[dependencies.directories] +version ="4.0" +optional = true + [dependencies.r2d2] version = "0.8" optional = true diff --git a/README.md b/README.md index c02165e..3081539 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ of un-cached arguments, specify `#[cached(sync_writes = true)]` / `#[once(sync_w - `redis_connection_manager`: Enable the optional `connection-manager` feature of `redis`. Any async redis caches created will use a connection manager instead of a `MultiplexedConnection` - `redis_ahash`: Enable the optional `ahash` feature of `redis` +- `disk_store`: Include disk cache store - `wasm`: Enable WASM support. Note that this feature is incompatible with `tokio`'s multi-thread runtime (`async_tokio_rt_multi_thread`) and all Redis features (`redis_store`, `redis_async_std`, `redis_tokio`, `redis_ahash`) @@ -137,6 +138,35 @@ async fn async_cached_sleep_secs(secs: u64) -> Result { } ``` +---- + +```rust,no_run,ignore +use cached::proc_macro::io_cached; +use cached::DiskCache; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq, Clone)] +enum ExampleError { + #[error("error with disk cache `{0}`")] + DiskError(String), +} + +/// Cache the results of a function on disk. +/// Cache files will be stored under the system cache dir +/// unless otherwise specified with `disk_dir` or the `create` argument. +/// A `map_error` closure must be specified to convert any +/// disk cache errors into the same type of error returned +/// by your function. All `io_cached` functions must return `Result`s. +#[io_cached( + map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"##, + disk = true +)] +fn cached_sleep_secs(secs: u64) -> Result { + std::thread::sleep(std::time::Duration::from_secs(secs)); + Ok(secs.to_string()) +} +``` + Functions defined via macros will have their results cached using the function's arguments as a key, a `convert` expression specified on a procedural macros, diff --git a/cached_proc_macro/src/io_cached.rs b/cached_proc_macro/src/io_cached.rs index 9c45bed..4c38843 100644 --- a/cached_proc_macro/src/io_cached.rs +++ b/cached_proc_macro/src/io_cached.rs @@ -12,6 +12,10 @@ use syn::{ struct IOMacroArgs { map_error: String, #[darling(default)] + disk: bool, + #[darling(default)] + disk_dir: Option, + #[darling(default)] redis: bool, #[darling(default)] cache_prefix_block: Option, @@ -149,6 +153,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { Some(ref name) => Ident::new(name, fn_ident.span()), None => Ident::new(&fn_ident.to_string().to_uppercase(), fn_ident.span()), }; + let cache_name = cache_ident.to_string(); let (cache_key_ty, key_convert_block) = make_cache_key_type( &args.key, @@ -161,13 +166,15 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { // make the cache type and create statement let (cache_ty, cache_create) = match ( &args.redis, + &args.disk, &args.time, &args.time_refresh, &args.cache_prefix_block, &args.cache_type, &args.cache_create, ) { - (true, time, time_refresh, cache_prefix, cache_type, cache_create) => { + // redis + (true, false, time, time_refresh, cache_prefix, cache_type, cache_create) => { let cache_ty = match cache_type { Some(cache_type) => { let cache_type = @@ -234,7 +241,63 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { }; (cache_ty, cache_create) } - (_, time, time_refresh, cache_prefix, cache_type, cache_create) => { + // disk + (false, true, time, time_refresh, _, cache_type, cache_create) => { + let cache_ty = match cache_type { + Some(cache_type) => { + let cache_type = + parse_str::(cache_type).expect("unable to parse cache type"); + quote! { #cache_type } + } + None => { + // https://github.com/spacejam/sled?tab=readme-ov-file#interaction-with-async + quote! { cached::DiskCache<#cache_key_ty, #cache_value_ty> } + } + }; + let cache_create = match cache_create { + Some(cache_create) => { + if time.is_some() || time_refresh.is_some() { + panic!( + "cannot specify `time` or `time_refresh` when passing `create block" + ); + } else { + let cache_create = parse_str::(cache_create.as_ref()) + .expect("unable to parse cache create block"); + quote! { #cache_create } + } + } + None => { + let create = quote! { + cached::DiskCache::new(#cache_name) + }; + let create = match time { + None => create, + Some(time) => { + quote! { + (#create).set_lifespan(#time) + } + } + }; + let create = match time_refresh { + None => create, + Some(time_refresh) => { + quote! { + (#create).set_refresh(#time_refresh) + } + } + }; + let create = match args.disk_dir { + None => create, + Some(disk_dir) => { + quote! { (#create).set_disk_directory(#disk_dir) } + } + }; + quote! { (#create).build().expect("error constructing DiskCache in #[io_cached] macro") } + } + }; + (cache_ty, cache_create) + } + (_, _, time, time_refresh, cache_prefix, cache_type, cache_create) => { let cache_ty = match cache_type { Some(cache_type) => { let cache_type = @@ -270,7 +333,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { let (set_cache_block, return_cache_block) = { let (set_cache_block, return_cache_block) = if args.with_cached_flag { ( - if asyncness.is_some() { + if asyncness.is_some() && !args.disk { quote! { if let Ok(result) = &result { cache.cache_set(key, result.value.clone()).await.map_err(#map_error)?; @@ -287,7 +350,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { ) } else { ( - if asyncness.is_some() { + if asyncness.is_some() && !args.disk { quote! { if let Ok(result) = &result { cache.cache_set(key, result.clone()).await.map_err(#map_error)?; @@ -342,6 +405,29 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { ); fill_in_attributes(&mut attributes, cache_fn_doc_extra); + let async_trait = if asyncness.is_some() && !args.disk { + quote! { + use cached::IOCachedAsync; + } + } else { + quote! { + use cached::IOCached; + } + }; + + let async_cache_get_return = if asyncness.is_some() && !args.disk { + quote! { + if let Some(result) = cache.cache_get(&key).await.map_err(#map_error)? { + #return_cache_block + } + } + } else { + quote! { + if let Some(result) = cache.cache_get(&key).map_err(#map_error)? { + #return_cache_block + } + } + }; // put it all together let expanded = if asyncness.is_some() { quote! { @@ -352,14 +438,12 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { #(#attributes)* #visibility #signature_no_muts { let init = || async { #cache_create }; - use cached::IOCachedAsync; + #async_trait let key = #key_convert_block; { // check if the result is cached let cache = &#cache_ident.get_or_init(init).await; - if let Some(result) = cache.cache_get(&key).await.map_err(#map_error)? { - #return_cache_block - } + #async_cache_get_return } #do_set_return_block } @@ -367,7 +451,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream { #[doc = #prime_fn_indent_doc] #[allow(dead_code)] #visibility #prime_sig { - use cached::IOCachedAsync; + #async_trait let init = || async { #cache_create }; let key = #key_convert_block; #do_set_return_block diff --git a/cached_proc_macro/src/lib.rs b/cached_proc_macro/src/lib.rs index 1b40fde..6a4a998 100644 --- a/cached_proc_macro/src/lib.rs +++ b/cached_proc_macro/src/lib.rs @@ -68,6 +68,7 @@ pub fn once(args: TokenStream, input: TokenStream) -> TokenStream { /// the error type returned by your function. /// - `name`: (optional, string) specify the name for the generated cache, defaults to the function name uppercase. /// - `redis`: (optional, bool) default to a `RedisCache` or `AsyncRedisCache` +/// - `disk`: (optional, bool) use a `DiskCache`, this must be set to true even if `type` and `create` are specified. /// - `time`: (optional, u64) specify a cache TTL in seconds, implies the cache type is a `TimedCached` or `TimedSizedCache`. /// - `time_refresh`: (optional, bool) specify whether to refresh the TTL on cache hits. /// - `type`: (optional, string type) explicitly specify the cache store type to use. diff --git a/examples/disk.rs b/examples/disk.rs new file mode 100644 index 0000000..83bbe5b --- /dev/null +++ b/examples/disk.rs @@ -0,0 +1,46 @@ +/* +run with required features: + cargo run --example disk --features "disk_store" + */ + +use cached::proc_macro::io_cached; +use std::io; +use std::io::Write; +use std::time::Duration; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq, Clone)] +enum ExampleError { + #[error("error with redis cache `{0}`")] + DiskError(String), +} + +// When the macro constructs your DiskCache instance, the default +// cache files will be stored under $system_cache_dir/cached_disk_cache/ +#[io_cached( + disk = true, + time = 30, + map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"## +)] +fn cached_sleep_secs(secs: u64) -> Result<(), ExampleError> { + std::thread::sleep(Duration::from_secs(secs)); + Ok(()) +} + +fn main() { + print!("1. first sync call with a 2 seconds sleep..."); + io::stdout().flush().unwrap(); + cached_sleep_secs(2).unwrap(); + println!("done"); + print!("second sync call with a 2 seconds sleep (it should be fast)..."); + io::stdout().flush().unwrap(); + cached_sleep_secs(2).unwrap(); + println!("done"); + + use cached::IOCached; + CACHED_SLEEP_SECS.cache_remove(&2).unwrap(); + print!("third sync call with a 2 seconds sleep (slow, after cache-remove)..."); + io::stdout().flush().unwrap(); + cached_sleep_secs(2).unwrap(); + println!("done"); +} diff --git a/src/lib.rs b/src/lib.rs index e12752b..fa49324 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,6 +32,7 @@ of un-cached arguments, specify `#[cached(sync_writes = true)]` / `#[once(sync_w - `redis_connection_manager`: Enable the optional `connection-manager` feature of `redis`. Any async redis caches created will use a connection manager instead of a `MultiplexedConnection` - `redis_ahash`: Enable the optional `ahash` feature of `redis` +- `disk_store`: Include disk cache store - `wasm`: Enable WASM support. Note that this feature is incompatible with `tokio`'s multi-thread runtime (`async_tokio_rt_multi_thread`) and all Redis features (`redis_store`, `redis_async_std`, `redis_tokio`, `redis_ahash`) @@ -139,6 +140,35 @@ async fn async_cached_sleep_secs(secs: u64) -> Result { } ``` +---- + +```rust,no_run,ignore +use cached::proc_macro::io_cached; +use cached::DiskCache; +use thiserror::Error; + +#[derive(Error, Debug, PartialEq, Clone)] +enum ExampleError { + #[error("error with disk cache `{0}`")] + DiskError(String), +} + +/// Cache the results of a function on disk. +/// Cache files will be stored under the system cache dir +/// unless otherwise specified with `disk_dir` or the `create` argument. +/// A `map_error` closure must be specified to convert any +/// disk cache errors into the same type of error returned +/// by your function. All `io_cached` functions must return `Result`s. +#[io_cached( + map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"##, + disk = true +)] +fn cached_sleep_secs(secs: u64) -> Result { + std::thread::sleep(std::time::Duration::from_secs(secs)); + Ok(secs.to_string()) +} +``` + Functions defined via macros will have their results cached using the function's arguments as a key, a `convert` expression specified on a procedural macros, @@ -186,6 +216,9 @@ pub use stores::AsyncRedisCache; pub use stores::{ CanExpire, ExpiringValueCache, SizedCache, TimedCache, TimedSizedCache, UnboundCache, }; +#[cfg(feature = "disk_store")] +#[cfg_attr(docsrs, doc(cfg(feature = "disk_store")))] +pub use stores::{DiskCache, DiskCacheError}; #[cfg(feature = "redis_store")] #[cfg_attr(docsrs, doc(cfg(feature = "redis_store")))] pub use stores::{RedisCache, RedisCacheError}; diff --git a/src/stores/disk.rs b/src/stores/disk.rs new file mode 100644 index 0000000..42eecbf --- /dev/null +++ b/src/stores/disk.rs @@ -0,0 +1,394 @@ +use crate::IOCached; +use directories::BaseDirs; +use instant::Duration; +use serde::de::DeserializeOwned; +use serde::Serialize; +use sled::Db; +use std::marker::PhantomData; +use std::path::Path; +use std::{fmt::Display, path::PathBuf, time::SystemTime}; + +pub struct DiskCacheBuilder { + seconds: Option, + refresh: bool, + disk_dir: Option, + cache_name: String, + _phantom: PhantomData<(K, V)>, +} + +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum DiskCacheBuildError { + #[error("Storage connection error")] + ConnectionError(#[from] sled::Error), + #[error("Connection string not specified or invalid in env var {env_key:?}: {error:?}")] + MissingDiskPath { + env_key: String, + error: std::env::VarError, + }, +} + +static DISK_FILE_PREFIX: &str = "cached_disk_cache"; +const DISK_FILE_VERSION: u64 = 1; + +impl DiskCacheBuilder +where + K: Display, + V: Serialize + DeserializeOwned, +{ + /// Initialize a `DiskCacheBuilder` + pub fn new>(cache_name: S) -> DiskCacheBuilder { + Self { + seconds: None, + refresh: false, + disk_dir: None, + cache_name: cache_name.as_ref().to_string(), + _phantom: Default::default(), + } + } + + /// Specify the cache TTL/lifespan in seconds + pub fn set_lifespan(mut self, seconds: u64) -> Self { + self.seconds = Some(seconds); + self + } + + /// Specify whether cache hits refresh the TTL + pub fn set_refresh(mut self, refresh: bool) -> Self { + self.refresh = refresh; + self + } + + /// Set the disk path for where the data will be stored + pub fn set_disk_directory>(mut self, dir: P) -> Self { + self.disk_dir = Some(dir.as_ref().into()); + self + } + + fn default_disk_dir() -> PathBuf { + BaseDirs::new() + .map(|base_dirs| { + let exe_name = std::env::current_exe() + .ok() + .and_then(|path| { + path.file_name() + .and_then(|os_str| os_str.to_str().map(|s| format!("{}_", s))) + }) + .unwrap_or_default(); + let dir_prefix = format!("{}{}", exe_name, DISK_FILE_PREFIX); + base_dirs.cache_dir().join(dir_prefix) + }) + .unwrap_or_else(|| { + std::env::current_dir().expect("disk cache unable to determine current directory") + }) + } + + pub fn build(self) -> Result, DiskCacheBuildError> { + let disk_dir = self.disk_dir.unwrap_or_else(|| Self::default_disk_dir()); + let disk_path = disk_dir.join(format!("{}_v{}", self.cache_name, DISK_FILE_VERSION)); + let connection = sled::open(disk_path.clone())?; + + Ok(DiskCache { + seconds: self.seconds, + refresh: self.refresh, + version: DISK_FILE_VERSION, + disk_path, + connection, + _phantom: self._phantom, + }) + } +} + +/// Cache store backed by disk +pub struct DiskCache { + pub(super) seconds: Option, + pub(super) refresh: bool, + #[allow(unused)] + version: u64, + #[allow(unused)] + disk_path: PathBuf, + connection: Db, + _phantom: PhantomData<(K, V)>, +} + +impl DiskCache +where + K: Display, + V: Serialize + DeserializeOwned, +{ + #[allow(clippy::new_ret_no_self)] + /// Initialize a `DiskCacheBuilder` + pub fn new(cache_name: &str) -> DiskCacheBuilder { + DiskCacheBuilder::new(cache_name) + } + + pub fn remove_expired_entries(&self, connection: &Db) { + let now = SystemTime::now(); + + for (key, value) in connection.iter().flatten() { + if let Ok(cached) = rmp_serde::from_slice::>(&value) { + if let Some(lifetime_seconds) = self.seconds { + if now + .duration_since(cached.created_at) + .unwrap_or(Duration::from_secs(0)) + < Duration::from_secs(lifetime_seconds) + { + let _ = connection.remove(key); + } + } + } + } + } +} + +#[derive(Error, Debug)] +pub enum DiskCacheError { + #[error("Storage error")] + StorageError(#[from] sled::Error), + #[error("Error deserializing cached value")] + CacheDeserializtionError(#[from] rmp_serde::decode::Error), + #[error("Error serializing cached value")] + CacheSerializtionError(#[from] rmp_serde::encode::Error), +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct CachedDiskValue { + pub(crate) value: V, + pub(crate) created_at: SystemTime, + pub(crate) version: u64, +} + +impl CachedDiskValue { + fn new(value: V) -> Self { + Self { + value, + created_at: SystemTime::now(), + version: 1, + } + } + + fn refresh_created_at(&mut self) { + self.created_at = SystemTime::now(); + } +} + +impl IOCached for DiskCache +where + K: Display, + V: Serialize + DeserializeOwned, +{ + type Error = DiskCacheError; + + fn cache_get(&self, key: &K) -> Result, DiskCacheError> { + let key = key.to_string(); + let seconds = self.seconds; + let refresh = self.refresh; + let update = |old: Option<&[u8]>| -> Option> { + if old.is_none() { + return None; + } + let old = old.unwrap(); + if seconds.is_none() { + return Some(old.to_vec()); + } + let seconds = seconds.unwrap(); + let mut cached = rmp_serde::from_slice::>(old) + .expect("error deserializing cached disk value"); + if SystemTime::now() + .duration_since(cached.created_at) + .unwrap_or(Duration::from_secs(0)) + < Duration::from_secs(seconds) + { + if refresh { + cached.refresh_created_at(); + } + let cache_val = + rmp_serde::to_vec(&cached).expect("error serializing cached disk value"); + Some(cache_val) + } else { + None + } + }; + + if let Some(data) = self.connection.update_and_fetch(&key, update)? { + let cached = rmp_serde::from_slice::>(&data)?; + Ok(Some(cached.value)) + } else { + Ok(None) + } + } + + fn cache_set(&self, key: K, value: V) -> Result, DiskCacheError> { + let key = key.to_string(); + let value = rmp_serde::to_vec(&CachedDiskValue::new(value))?; + + if let Some(data) = self.connection.insert(key, value)? { + let cached = rmp_serde::from_slice::>(&data)?; + + if let Some(lifetime_seconds) = self.seconds { + if SystemTime::now() + .duration_since(cached.created_at) + .unwrap_or(Duration::from_secs(0)) + < Duration::from_secs(lifetime_seconds) + { + Ok(Some(cached.value)) + } else { + Ok(None) + } + } else { + Ok(Some(cached.value)) + } + } else { + Ok(None) + } + } + + fn cache_remove(&self, key: &K) -> Result, DiskCacheError> { + let key = key.to_string(); + if let Some(data) = self.connection.remove(key)? { + let cached = rmp_serde::from_slice::>(&data)?; + + if let Some(lifetime_seconds) = self.seconds { + if SystemTime::now() + .duration_since(cached.created_at) + .unwrap_or(Duration::from_secs(0)) + < Duration::from_secs(lifetime_seconds) + { + Ok(Some(cached.value)) + } else { + Ok(None) + } + } else { + Ok(Some(cached.value)) + } + } else { + Ok(None) + } + } + + fn cache_lifespan(&self) -> Option { + self.seconds + } + + fn cache_set_lifespan(&mut self, seconds: u64) -> Option { + let old = self.seconds; + self.seconds = Some(seconds); + old + } + + fn cache_set_refresh(&mut self, refresh: bool) -> bool { + let old = self.refresh; + self.refresh = refresh; + old + } +} + +#[cfg(test)] +/// Cache store tests +mod tests { + use std::thread::sleep; + use std::time::Duration; + + use super::*; + + fn now_millis() -> u128 { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis() + } + + #[test] + fn disk_set_get_remove() { + let cache: DiskCache = + DiskCache::new(&format!("{}:disk-cache-test-sgr", now_millis())) + .set_disk_directory(std::env::temp_dir().join("cachedtest-sgr")) + .build() + .unwrap(); + + let cached = cache.cache_get(&6).unwrap(); + assert!(cached.is_none()); + + let cached = cache.cache_set(6, 4444).unwrap(); + assert_eq!(cached, None); + + let cached = cache.cache_set(6, 5555).unwrap(); + assert_eq!(cached, Some(4444)); + + let cached = cache.cache_get(&6).unwrap(); + assert_eq!(cached, Some(5555)); + + let cached = cache.cache_remove(&6).unwrap(); + assert_eq!(cached, Some(5555)); + + let cached = cache.cache_get(&6).unwrap(); + assert!(cached.is_none()); + + drop(cache); + } + + #[test] + fn disk_expire() { + let mut c: DiskCache = + DiskCache::new(&format!("{}:disk-cache-test", now_millis())) + .set_lifespan(2) + .build() + .unwrap(); + + assert!(c.cache_get(&1).unwrap().is_none()); + + assert!(c.cache_set(1, 100).unwrap().is_none()); + assert!(c.cache_get(&1).unwrap().is_some()); + + sleep(Duration::new(2, 500000)); + assert!(c.cache_get(&1).unwrap().is_none()); + + let old = c.cache_set_lifespan(1).unwrap(); + assert_eq!(2, old); + assert!(c.cache_set(1, 100).unwrap().is_none()); + assert!(c.cache_get(&1).unwrap().is_some()); + + sleep(Duration::new(1, 600000)); + assert!(c.cache_get(&1).unwrap().is_none()); + + c.cache_set_lifespan(10).unwrap(); + assert!(c.cache_set(1, 100).unwrap().is_none()); + assert!(c.cache_set(2, 100).unwrap().is_none()); + assert_eq!(c.cache_get(&1).unwrap().unwrap(), 100); + assert_eq!(c.cache_get(&1).unwrap().unwrap(), 100); + } + + #[test] + fn disk_remove() { + let cache: DiskCache = + DiskCache::new(&format!("{}:disk-cache-test-remove", now_millis())) + .set_disk_directory(std::env::temp_dir().join("cachedtest-remove")) + .build() + .unwrap(); + + assert!(cache.cache_set(1, 100).unwrap().is_none()); + assert!(cache.cache_set(2, 200).unwrap().is_none()); + assert!(cache.cache_set(3, 300).unwrap().is_none()); + + assert_eq!(100, cache.cache_remove(&1).unwrap().unwrap()); + + drop(cache); + } + + #[test] + fn disk_default_cache_dir() { + let cache: DiskCache = + DiskCache::new(&format!("{}:disk-cache-test-default-dir", now_millis())) + .build() + .unwrap(); + + assert!(cache.cache_set(1, 100).unwrap().is_none()); + assert!(cache.cache_set(2, 200).unwrap().is_none()); + assert!(cache.cache_set(3, 300).unwrap().is_none()); + + assert_eq!(100, cache.cache_remove(&1).unwrap().unwrap()); + + drop(cache); + } +} diff --git a/src/stores/mod.rs b/src/stores/mod.rs index 7753a3f..3aef8c4 100644 --- a/src/stores/mod.rs +++ b/src/stores/mod.rs @@ -8,6 +8,8 @@ use std::hash::Hash; #[cfg(feature = "async")] use {super::CachedAsync, async_trait::async_trait, futures::Future}; +#[cfg(feature = "disk_store")] +mod disk; mod expiring_value_cache; #[cfg(feature = "redis_store")] mod redis; @@ -16,6 +18,8 @@ mod timed; mod timed_sized; mod unbound; +#[cfg(feature = "disk_store")] +pub use crate::stores::disk::{DiskCache, DiskCacheBuildError, DiskCacheBuilder, DiskCacheError}; #[cfg(feature = "redis_store")] #[cfg_attr(docsrs, doc(cfg(feature = "redis_store")))] pub use crate::stores::redis::{ diff --git a/tests/cached.rs b/tests/cached.rs index 2df201c..97fd7af 100644 --- a/tests/cached.rs +++ b/tests/cached.rs @@ -1208,6 +1208,111 @@ fn test_mutable_args_once() { assert_eq!((2, 2), mutable_args_once(5, 6)); } +#[cfg(feature = "disk_store")] +mod disk_tests { + use super::*; + use cached::proc_macro::io_cached; + use cached::DiskCache; + use thiserror::Error; + + #[derive(Error, Debug, PartialEq, Clone)] + enum TestError { + #[error("error with disk cache `{0}`")] + DiskError(String), + #[error("count `{0}`")] + Count(u32), + } + + #[io_cached( + disk = true, + time = 1, + map_error = r##"|e| TestError::DiskError(format!("{:?}", e))"## + )] + fn cached_disk(n: u32) -> Result { + if n < 5 { + Ok(n) + } else { + Err(TestError::Count(n)) + } + } + + #[test] + fn test_cached_disk() { + assert_eq!(cached_disk(1), Ok(1)); + assert_eq!(cached_disk(1), Ok(1)); + assert_eq!(cached_disk(5), Err(TestError::Count(5))); + assert_eq!(cached_disk(6), Err(TestError::Count(6))); + } + + #[io_cached( + disk = true, + time = 1, + with_cached_flag = true, + map_error = r##"|e| TestError::DiskError(format!("{:?}", e))"## + )] + fn cached_disk_cached_flag(n: u32) -> Result, TestError> { + if n < 5 { + Ok(cached::Return::new(n)) + } else { + Err(TestError::Count(n)) + } + } + + #[test] + fn test_cached_disk_cached_flag() { + assert!(!cached_disk_cached_flag(1).unwrap().was_cached); + assert!(cached_disk_cached_flag(1).unwrap().was_cached); + assert!(cached_disk_cached_flag(5).is_err()); + assert!(cached_disk_cached_flag(6).is_err()); + } + + #[io_cached( + map_error = r##"|e| TestError::DiskError(format!("{:?}", e))"##, + type = "cached::DiskCache", + create = r##" { DiskCache::new("cached_disk_cache_create").set_lifespan(1).set_refresh(true).build().expect("error building disk cache") } "## + )] + fn cached_disk_cache_create(n: u32) -> Result { + if n < 5 { + Ok(n) + } else { + Err(TestError::Count(n)) + } + } + + #[test] + fn test_cached_disk_cache_create() { + assert_eq!(cached_disk_cache_create(1), Ok(1)); + assert_eq!(cached_disk_cache_create(1), Ok(1)); + assert_eq!(cached_disk_cache_create(5), Err(TestError::Count(5))); + assert_eq!(cached_disk_cache_create(6), Err(TestError::Count(6))); + } + + #[cfg(feature = "async")] + mod async_test { + use super::*; + + #[io_cached( + disk = true, + map_error = r##"|e| TestError::DiskError(format!("{:?}", e))"## + )] + async fn async_cached_disk(n: u32) -> Result { + if n < 5 { + Ok(n) + } else { + Err(TestError::Count(n)) + } + } + + #[tokio::test] + async fn test_async_cached_disk() { + assert_eq!(async_cached_disk(1).await, Ok(1)); + assert_eq!(async_cached_disk(1).await, Ok(1)); + assert_eq!(async_cached_disk(5).await, Err(TestError::Count(5))); + assert_eq!(async_cached_disk(6).await, Err(TestError::Count(6))); + } + } +} + #[cfg(feature = "redis_store")] mod redis_tests { use super::*;