diff --git a/evaluation/tests/large_remove.c b/evaluation/tests/large_remove.c new file mode 100644 index 000000000..f20f855ce --- /dev/null +++ b/evaluation/tests/large_remove.c @@ -0,0 +1,42 @@ +#include +#include +#include +#include +#include +#define PAGESZ 4096 + +int main(void) { + struct statvfs stat; + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_start = stat.f_bfree; + + char *path = "/mnt/pmem/myfile"; + + char data[4096]; + memset(data, '\0', PAGESZ); + int fd = open(path, O_RDWR | O_CREAT); + assert(fd > 0); + + + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_after_create = stat.f_bfree; + assert (pages_start == pages_after_create + 1); // assert inode creation ccorrect + + const int num_pages = 200000; + for (int i = 0; i < num_pages; i++) { + assert(write(fd, data, PAGESZ) == PAGESZ); + } + + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_now_free = stat.f_bfree; + assert(pages_after_create == pages_now_free + num_pages); // assert all pages are in use + + assert(lseek(fd, 0, SEEK_CUR) == (num_pages * PAGESZ)); // assert file expected size + close(fd); + + assert (remove(path) == 0); + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_end = stat.f_bfree; + assert(pages_start == pages_end); // assert all pages back in free list + return 0; +} \ No newline at end of file diff --git a/evaluation/tests/remove_multiple_files.c b/evaluation/tests/remove_multiple_files.c new file mode 100644 index 000000000..395f4ae51 --- /dev/null +++ b/evaluation/tests/remove_multiple_files.c @@ -0,0 +1,35 @@ +#include +#include +#include +#include +#include +#include +#include +#define PAGESZ 4096 + +// create many files and test whether removing will return all pages to free list +int main(void) { + struct statvfs stat; + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_start = stat.f_bfree; + unsigned num_files = 5000; + char filename[64]; + char data[PAGESZ * 2]; + memset(data, '\0', PAGESZ * 2); + memset(filename, 0, 64); + for (int i = 0; i < num_files; i ++) { + sprintf(filename, "/mnt/pmem/%d", i); + int fd = open(filename, O_CREAT | O_RDWR); + assert(write(fd, data, PAGESZ * 2) == PAGESZ * 2); + close(fd); + } + + for (int i = 0; i < num_files; i ++) { + sprintf(filename, "/mnt/pmem/%d", i); + assert(remove(filename) == 0); + } + assert(statvfs("/mnt/pmem", &stat) == 0); + unsigned long pages_end = stat.f_bfree; + assert(pages_start == pages_end); + return 0; +} \ No newline at end of file diff --git a/linux/fs/squirrelfs/balloc.rs b/linux/fs/squirrelfs/balloc.rs index 2d986ba1e..aaf48a090 100644 --- a/linux/fs/squirrelfs/balloc.rs +++ b/linux/fs/squirrelfs/balloc.rs @@ -261,23 +261,39 @@ impl PageAllocator for Option { fn dealloc_data_page_list(&self, pages: &DataPageListWrapper) -> Result<()> { if let Some(allocator) = self { let mut page_list = pages.get_page_list_cursor(); - let mut page = page_list.current(); + + // RBTree to store the free list for every cpu + let mut cpu_free_list_map : RBTree> = RBTree::new(); + + let mut page = page_list.current(); // head of page list + + // get a list of pages #s for each cpu while page.is_some() { - // janky syntax to deal with the fact that page_list.current() returns an Option if let Some(page) = page { - // TODO: refactor to avoid acquiring lock on every iteration - allocator.dealloc_page(page.get_page_no())?; + let cpu : usize = allocator.pageno2cpuid(page.get_page_no())?; + + // add cpu page to vector (vector is mutable) + let cpu_page_vec : Option<&mut Vec> = cpu_free_list_map.get_mut(&cpu); + + if let Some(cpu_page_vec) = cpu_page_vec { + cpu_page_vec.try_push(page.get_page_no())?; + } else { + let mut free_list : Vec = Vec::new(); + free_list.try_push(page.get_page_no())?; + cpu_free_list_map.try_insert(cpu, free_list)?; + } + page_list.move_next(); - } else { - unreachable!() } - page = page_list.current(); + page = page_list.current(); } + allocator.dealloc_multiple_page(cpu_free_list_map)?; Ok(()) + } else { pr_info!("ERROR: page allocator is uninitialized\n"); Err(EINVAL) - } + } } fn dealloc_dir_page<'a>(&self, page: &DirPageWrapper<'a, Clean, Dealloc>) -> Result<()> { @@ -343,6 +359,50 @@ impl PerCpuPageAllocator { Ok(()) } } + + + fn dealloc_multiple_page(&self, cpu_free_list_map : RBTree>) -> Result<()> { + for (cpu, page_nos) in cpu_free_list_map.iter() { + + let free_list = Arc::clone(&self.free_lists[*cpu]); + let mut free_list = free_list.lock(); + + for page_no in page_nos.iter() { + free_list.free_pages += 1; + let res = free_list.list.try_insert(*page_no, ()); + + // unwrap the error so we can get at the option + let res = match res { + Ok(res) => res, + Err(e) => { + pr_info!( + "ERROR: failed to insert {:?} into page allocator at CPU {:?}, error {:?}\n", + page_no, + cpu, + e + ); + return Err(e); + } + }; + + if res.is_some() { + pr_info!( + "ERROR: page {:?} was already in the allocator at CPU {:?}\n", + page_no, + cpu + ); + return Err(EINVAL); + } + } + } + Ok(()) + } + + fn pageno2cpuid(&self, page_no : PageNum) -> Result { + let cpu: usize = ((page_no - self.start) / self.pages_per_cpu).try_into()?; + Ok(cpu) + } + } // placeholder page descriptor that can represent either a dir or data page descriptor @@ -2365,4 +2425,4 @@ impl DataPageListWrapper { pages: self.pages, } } -} +} \ No newline at end of file