Skip to content

Commit

Permalink
pool.hpp
Browse files Browse the repository at this point in the history
  • Loading branch information
wsm25 committed May 3, 2024
1 parent 9233442 commit 11ab00d
Show file tree
Hide file tree
Showing 5 changed files with 204 additions and 252 deletions.
95 changes: 95 additions & 0 deletions pool.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
// Copyright: (c) 2024 wsm25

#ifndef WSM_POOL_HPP
#define WSM_POOL_HPP
#include <cstdlib>
#include <cstdio>
#include <new>

/// @brief Faster vector for manually-drop types, especially built-in types.
/// @tparam T MUST NOT have custom destructor
template<typename T>
class Vec{
T *from, *end, *cur;
public:
Vec(){
from=cur=(T*)malloc(sizeof(T)*4);
end=cur+4;
}
~Vec(){free(from);}
void push(T x){
if(cur==end){
size_t size=((size_t)end - (size_t)from), doubled=size*2;
from=(T*)realloc(from, doubled);
cur=(T*)((size_t)from + size);
end=(T*)((size_t)from + doubled);
}
*(cur++)=x; // UB if T has custom destructor
}
// SAFETY: must check empty
T pop(){return *(--cur);}
bool empty(){return cur==from;}
// slow
size_t len(){return cur-from;}
};

/*
* Pool: allocate in exponentially growing batch, reducing pressure
* on allocator.
*
* ## Usage
```cpp
Pool<int> pool;
// the same effect as ptr=new int;
int* ptr=pool.get();
// returns ptr to pool, similar to free(ptr)
pool.put(ptr);
```
*
* For performance consideration, especially for allowing uninitialized
* types and better optimization for built-in types, we will not support
* classes with custom destructors. If you would use that, please call
* placement constructor when getting from pool, and call destructor when
* dropping(put into pool/simply discard)
*
* Benchmark result (in average, O1 optimization, Linux):
* - Pool: <1 ns per get/put
* - Stdlib: 25 ns per malloc, 7 ns per free
*/
template<typename T>
class Pool{
class Buf{
T *from, *end, *cur;
public:
Buf(size_t cap){
from=cur=(T*)malloc(cap*sizeof(T));
end=from+cap;
}
bool full(){return cur>=end;}
// UNSAFE: assume full==false
T* get(){return (cur++);}
size_t cap(){return end-from;}
T* raw(){return from;}
};

Buf buf;
Vec<T*> used; // bufs
Vec<T*> idle;
public:
Pool():buf(Buf(4)){}
~Pool(){
while(!used.empty()) free(used.pop());
free(buf.raw());
}
T* get(){
if(idle.empty()){
if(buf.full()){
used.push(buf.raw());
new(&buf) Buf(buf.cap()*2);
}
return buf.get();
} else return idle.pop();
}
void put(T* p){idle.push(p);}
};
#endif
16 changes: 0 additions & 16 deletions toys-rs/benches/benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,6 @@ fn bench_pool(c: &mut Criterion) {
p.release(1024);
}

#[allow(deprecated)]
fn _bench_thinpool(c: &mut Criterion) {
use toys_rs::thinpool::Pool;
let mut p:Pool<u8>=unsafe{Pool::new()};
c.bench_function("thinpool one-shot", |b| b.iter(|| {
black_box(p.get());
}));
let mut v=Vec::new();
c.bench_function("thinpool random", |b| b.iter(|| {
match random::<bool>(){ // 2ns
true=>{v.push(p.get());}, // 2ns+4ns
false=>{black_box(v.pop());}
}
}));
println!("{} in use, {} idling", v.len(), p.idle());
}

criterion_group!(benches, bench_pool);
criterion_main!(benches);
108 changes: 108 additions & 0 deletions toys-rs/src/heappool.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
//! A simple single-thread object pool. As size for single
//! object is fixed, chunks and bins in malloc are too heavy.
//!
//! maintains a virtual memory mapping, enableing
//! - use of memory fragments
//! - prefer lower address
//! - can automatically dealloc high address

pub struct Heap<T>{
raw: *mut T,
top: *mut T,
cap: usize,
idle: BinaryHeap<NonNull<T>>,
}

impl<T> Heap<T>{
pub fn new(cap: usize)->Self{
let raw=unsafe{crate::mem::new_arr(cap)};
Self { raw , top: raw, cap , idle:BinaryHeap::new()}
}
pub fn get(&mut self)->Option<NonNull<T>>{
match self.idle.is_empty(){
false=>self.idle.pop(),
true=>{
if (self.top as usize- self.raw as usize)<self.cap{
let ptr=Some(unsafe{NonNull::new_unchecked(self.top)});
self.top=unsafe{self.top.add(1)};
ptr
} else {None}
}
}
}
// UNSAFE: ptr MUST belong to this heap
pub fn put(&mut self, ptr: NonNull<T>){
if ptr.as_ptr() == unsafe{self.top.sub(1)}{
self.top=unsafe{self.top.sub(1)};
while let Some(ptr)=self.idle.peek(){
if ptr.as_ptr() != unsafe{self.top.sub(1)} {break;}
self.top=unsafe{self.top.sub(1)};
self.idle.pop();
}
} else {
self.idle.push(ptr);
}
}
}

impl<T> Drop for Heap<T>{
fn drop(&mut self) {
unsafe{crate::mem::delete_arr(self.raw, self.cap)};
}
}

#[cfg(test)]
mod tests {

#[test]
fn _test_init(){
use super::*;
let mut counter=1;
let mut p = Heap::<i32>::new(10);
let g1=p.get().unwrap();
let g2=p.get().unwrap();
println!("{}",*unsafe{g1.as_ref()});
println!("{}",*unsafe{g2.as_ref()});
drop(p);
}

/*
// #[test]
fn _test_tokio(){
use tokio::{
runtime::Builder,
task::{LocalSet, spawn_local, yield_now},
};
use super::*;
async fn sleepygreeting(mut pool: Pool<i32>){
for _ in 0..5{
let x=pool.get();
if true==rand::random(){
yield_now().await;
}
println!("Get {} from pool!", *x);
}
}
async fn tokio_main(){
let mut ipool=0;
let pool = Pool::with_generator(move||{ipool+=1; ipool});
let mut tasks = Vec::new();
for _ in 0..5{
tasks.push(spawn_local(
sleepygreeting(pool.clone())
));
}
for t in tasks{
let _ = t.await;
}
}
Builder::new_current_thread().enable_time().build().unwrap().block_on(
LocalSet::new().run_until(tokio_main())
);
}
*/
}

use std::{cell::UnsafeCell, collections::{BTreeMap, BinaryHeap, HashMap}, mem::ManuallyDrop, ptr::NonNull, rc::Rc};

use crate::mem;
3 changes: 1 addition & 2 deletions toys-rs/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
//! Rust toy libraries
pub mod mem;
pub mod localpool;
#[deprecated="benchmark shows terrible performance"]
pub mod thinpool;
pub mod heappool;
#[deprecated]
pub mod locallock;
pub mod rcnode;
Loading

0 comments on commit 11ab00d

Please sign in to comment.