Skip to content

Commit

Permalink
Merge pull request #145 from tirithen/disk-cache
Browse files Browse the repository at this point in the history
disk cache
  • Loading branch information
jaemk authored Feb 24, 2024
2 parents f6f1710 + afb670c commit 1099008
Show file tree
Hide file tree
Showing 9 changed files with 720 additions and 10 deletions.
15 changes: 14 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ repository = "https://github.com/jaemk/cached"
documentation = "https://docs.rs/cached"
readme = "README.md"
categories = ["caching", "data-structures"]
keywords = ["caching", "cache", "memoize", "lru", "redis"]
keywords = ["caching", "cache", "memoize", "lru", "redis", "disk"]
license = "MIT"
edition = "2018"

Expand All @@ -26,6 +26,7 @@ redis_connection_manager = ["redis_store", "redis/connection-manager"]
redis_async_std = ["redis_store", "async", "redis/aio", "redis/async-std-comp", "redis/tls", "redis/async-std-tls-comp"]
redis_tokio = ["redis_store", "async", "redis/aio", "redis/tokio-comp", "redis/tls", "redis/tokio-native-tls-comp"]
redis_ahash = ["redis_store", "redis/ahash"]
disk_store = ["sled", "serde", "rmp-serde", "directories"]
wasm = ["instant/wasm-bindgen"]

[dependencies.cached_proc_macro]
Expand Down Expand Up @@ -68,6 +69,18 @@ version = "0.24"
features = ["r2d2"]
optional = true

[dependencies.sled]
version = "0.34"
optional = true

[dependencies.rmp-serde]
version ="1.1"
optional = true

[dependencies.directories]
version ="4.0"
optional = true

[dependencies.r2d2]
version = "0.8"
optional = true
Expand Down
30 changes: 30 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ of un-cached arguments, specify `#[cached(sync_writes = true)]` / `#[once(sync_w
- `redis_connection_manager`: Enable the optional `connection-manager` feature of `redis`. Any async redis caches created
will use a connection manager instead of a `MultiplexedConnection`
- `redis_ahash`: Enable the optional `ahash` feature of `redis`
- `disk_store`: Include disk cache store
- `wasm`: Enable WASM support. Note that this feature is incompatible with `tokio`'s multi-thread
runtime (`async_tokio_rt_multi_thread`) and all Redis features (`redis_store`, `redis_async_std`, `redis_tokio`, `redis_ahash`)

Expand Down Expand Up @@ -137,6 +138,35 @@ async fn async_cached_sleep_secs(secs: u64) -> Result<String, ExampleError> {
}
```

----

```rust,no_run,ignore
use cached::proc_macro::io_cached;
use cached::DiskCache;
use thiserror::Error;
#[derive(Error, Debug, PartialEq, Clone)]
enum ExampleError {
#[error("error with disk cache `{0}`")]
DiskError(String),
}
/// Cache the results of a function on disk.
/// Cache files will be stored under the system cache dir
/// unless otherwise specified with `disk_dir` or the `create` argument.
/// A `map_error` closure must be specified to convert any
/// disk cache errors into the same type of error returned
/// by your function. All `io_cached` functions must return `Result`s.
#[io_cached(
map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"##,
disk = true
)]
fn cached_sleep_secs(secs: u64) -> Result<String, ExampleError> {
std::thread::sleep(std::time::Duration::from_secs(secs));
Ok(secs.to_string())
}
```


Functions defined via macros will have their results cached using the
function's arguments as a key, a `convert` expression specified on a procedural macros,
Expand Down
102 changes: 93 additions & 9 deletions cached_proc_macro/src/io_cached.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ use syn::{
struct IOMacroArgs {
map_error: String,
#[darling(default)]
disk: bool,
#[darling(default)]
disk_dir: Option<String>,
#[darling(default)]
redis: bool,
#[darling(default)]
cache_prefix_block: Option<String>,
Expand Down Expand Up @@ -149,6 +153,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
Some(ref name) => Ident::new(name, fn_ident.span()),
None => Ident::new(&fn_ident.to_string().to_uppercase(), fn_ident.span()),
};
let cache_name = cache_ident.to_string();

let (cache_key_ty, key_convert_block) = make_cache_key_type(
&args.key,
Expand All @@ -161,13 +166,15 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
// make the cache type and create statement
let (cache_ty, cache_create) = match (
&args.redis,
&args.disk,
&args.time,
&args.time_refresh,
&args.cache_prefix_block,
&args.cache_type,
&args.cache_create,
) {
(true, time, time_refresh, cache_prefix, cache_type, cache_create) => {
// redis
(true, false, time, time_refresh, cache_prefix, cache_type, cache_create) => {
let cache_ty = match cache_type {
Some(cache_type) => {
let cache_type =
Expand Down Expand Up @@ -234,7 +241,63 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
};
(cache_ty, cache_create)
}
(_, time, time_refresh, cache_prefix, cache_type, cache_create) => {
// disk
(false, true, time, time_refresh, _, cache_type, cache_create) => {
let cache_ty = match cache_type {
Some(cache_type) => {
let cache_type =
parse_str::<Type>(cache_type).expect("unable to parse cache type");
quote! { #cache_type }
}
None => {
// https://github.com/spacejam/sled?tab=readme-ov-file#interaction-with-async
quote! { cached::DiskCache<#cache_key_ty, #cache_value_ty> }
}
};
let cache_create = match cache_create {
Some(cache_create) => {
if time.is_some() || time_refresh.is_some() {
panic!(
"cannot specify `time` or `time_refresh` when passing `create block"
);
} else {
let cache_create = parse_str::<Block>(cache_create.as_ref())
.expect("unable to parse cache create block");
quote! { #cache_create }
}
}
None => {
let create = quote! {
cached::DiskCache::new(#cache_name)
};
let create = match time {
None => create,
Some(time) => {
quote! {
(#create).set_lifespan(#time)
}
}
};
let create = match time_refresh {
None => create,
Some(time_refresh) => {
quote! {
(#create).set_refresh(#time_refresh)
}
}
};
let create = match args.disk_dir {
None => create,
Some(disk_dir) => {
quote! { (#create).set_disk_directory(#disk_dir) }
}
};
quote! { (#create).build().expect("error constructing DiskCache in #[io_cached] macro") }
}
};
(cache_ty, cache_create)
}
(_, _, time, time_refresh, cache_prefix, cache_type, cache_create) => {
let cache_ty = match cache_type {
Some(cache_type) => {
let cache_type =
Expand Down Expand Up @@ -270,7 +333,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
let (set_cache_block, return_cache_block) = {
let (set_cache_block, return_cache_block) = if args.with_cached_flag {
(
if asyncness.is_some() {
if asyncness.is_some() && !args.disk {
quote! {
if let Ok(result) = &result {
cache.cache_set(key, result.value.clone()).await.map_err(#map_error)?;
Expand All @@ -287,7 +350,7 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
)
} else {
(
if asyncness.is_some() {
if asyncness.is_some() && !args.disk {
quote! {
if let Ok(result) = &result {
cache.cache_set(key, result.clone()).await.map_err(#map_error)?;
Expand Down Expand Up @@ -342,6 +405,29 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
);
fill_in_attributes(&mut attributes, cache_fn_doc_extra);

let async_trait = if asyncness.is_some() && !args.disk {
quote! {
use cached::IOCachedAsync;
}
} else {
quote! {
use cached::IOCached;
}
};

let async_cache_get_return = if asyncness.is_some() && !args.disk {
quote! {
if let Some(result) = cache.cache_get(&key).await.map_err(#map_error)? {
#return_cache_block
}
}
} else {
quote! {
if let Some(result) = cache.cache_get(&key).map_err(#map_error)? {
#return_cache_block
}
}
};
// put it all together
let expanded = if asyncness.is_some() {
quote! {
Expand All @@ -352,22 +438,20 @@ pub fn io_cached(args: TokenStream, input: TokenStream) -> TokenStream {
#(#attributes)*
#visibility #signature_no_muts {
let init = || async { #cache_create };
use cached::IOCachedAsync;
#async_trait
let key = #key_convert_block;
{
// check if the result is cached
let cache = &#cache_ident.get_or_init(init).await;
if let Some(result) = cache.cache_get(&key).await.map_err(#map_error)? {
#return_cache_block
}
#async_cache_get_return
}
#do_set_return_block
}
// Prime cached function
#[doc = #prime_fn_indent_doc]
#[allow(dead_code)]
#visibility #prime_sig {
use cached::IOCachedAsync;
#async_trait
let init = || async { #cache_create };
let key = #key_convert_block;
#do_set_return_block
Expand Down
1 change: 1 addition & 0 deletions cached_proc_macro/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ pub fn once(args: TokenStream, input: TokenStream) -> TokenStream {
/// the error type returned by your function.
/// - `name`: (optional, string) specify the name for the generated cache, defaults to the function name uppercase.
/// - `redis`: (optional, bool) default to a `RedisCache` or `AsyncRedisCache`
/// - `disk`: (optional, bool) use a `DiskCache`, this must be set to true even if `type` and `create` are specified.
/// - `time`: (optional, u64) specify a cache TTL in seconds, implies the cache type is a `TimedCached` or `TimedSizedCache`.
/// - `time_refresh`: (optional, bool) specify whether to refresh the TTL on cache hits.
/// - `type`: (optional, string type) explicitly specify the cache store type to use.
Expand Down
46 changes: 46 additions & 0 deletions examples/disk.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
run with required features:
cargo run --example disk --features "disk_store"
*/

use cached::proc_macro::io_cached;
use std::io;
use std::io::Write;
use std::time::Duration;
use thiserror::Error;

#[derive(Error, Debug, PartialEq, Clone)]
enum ExampleError {
#[error("error with redis cache `{0}`")]
DiskError(String),
}

// When the macro constructs your DiskCache instance, the default
// cache files will be stored under $system_cache_dir/cached_disk_cache/
#[io_cached(
disk = true,
time = 30,
map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"##
)]
fn cached_sleep_secs(secs: u64) -> Result<(), ExampleError> {
std::thread::sleep(Duration::from_secs(secs));
Ok(())
}

fn main() {
print!("1. first sync call with a 2 seconds sleep...");
io::stdout().flush().unwrap();
cached_sleep_secs(2).unwrap();
println!("done");
print!("second sync call with a 2 seconds sleep (it should be fast)...");
io::stdout().flush().unwrap();
cached_sleep_secs(2).unwrap();
println!("done");

use cached::IOCached;
CACHED_SLEEP_SECS.cache_remove(&2).unwrap();
print!("third sync call with a 2 seconds sleep (slow, after cache-remove)...");
io::stdout().flush().unwrap();
cached_sleep_secs(2).unwrap();
println!("done");
}
33 changes: 33 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ of un-cached arguments, specify `#[cached(sync_writes = true)]` / `#[once(sync_w
- `redis_connection_manager`: Enable the optional `connection-manager` feature of `redis`. Any async redis caches created
will use a connection manager instead of a `MultiplexedConnection`
- `redis_ahash`: Enable the optional `ahash` feature of `redis`
- `disk_store`: Include disk cache store
- `wasm`: Enable WASM support. Note that this feature is incompatible with `tokio`'s multi-thread
runtime (`async_tokio_rt_multi_thread`) and all Redis features (`redis_store`, `redis_async_std`, `redis_tokio`, `redis_ahash`)
Expand Down Expand Up @@ -139,6 +140,35 @@ async fn async_cached_sleep_secs(secs: u64) -> Result<String, ExampleError> {
}
```
----
```rust,no_run,ignore
use cached::proc_macro::io_cached;
use cached::DiskCache;
use thiserror::Error;
#[derive(Error, Debug, PartialEq, Clone)]
enum ExampleError {
#[error("error with disk cache `{0}`")]
DiskError(String),
}
/// Cache the results of a function on disk.
/// Cache files will be stored under the system cache dir
/// unless otherwise specified with `disk_dir` or the `create` argument.
/// A `map_error` closure must be specified to convert any
/// disk cache errors into the same type of error returned
/// by your function. All `io_cached` functions must return `Result`s.
#[io_cached(
map_error = r##"|e| ExampleError::DiskError(format!("{:?}", e))"##,
disk = true
)]
fn cached_sleep_secs(secs: u64) -> Result<String, ExampleError> {
std::thread::sleep(std::time::Duration::from_secs(secs));
Ok(secs.to_string())
}
```
Functions defined via macros will have their results cached using the
function's arguments as a key, a `convert` expression specified on a procedural macros,
Expand Down Expand Up @@ -186,6 +216,9 @@ pub use stores::AsyncRedisCache;
pub use stores::{
CanExpire, ExpiringValueCache, SizedCache, TimedCache, TimedSizedCache, UnboundCache,
};
#[cfg(feature = "disk_store")]
#[cfg_attr(docsrs, doc(cfg(feature = "disk_store")))]
pub use stores::{DiskCache, DiskCacheError};
#[cfg(feature = "redis_store")]
#[cfg_attr(docsrs, doc(cfg(feature = "redis_store")))]
pub use stores::{RedisCache, RedisCacheError};
Expand Down
Loading

0 comments on commit 1099008

Please sign in to comment.