Skip to content

Commit

Permalink
fix: When track pages receive more writes, treat them as dirty pages
Browse files Browse the repository at this point in the history
* chore: Use debug assertions to document get_page_indices input requirements

* fix: When track pages receive more writes, treat them as dirty pages

Credit goes to @mohanson for providing the tests
  • Loading branch information
xxuejie authored Mar 20, 2024
1 parent abba8dc commit bae0fb8
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 5 deletions.
1 change: 1 addition & 0 deletions src/memory/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ pub fn check_no_overflow(addr: u64, size: u64, memory_size: u64) -> Result<(), E

// `size` should be none zero u64
pub fn get_page_indices(addr: u64, size: u64) -> (u64, u64) {
debug_assert!(size > 0);
let addr_end = addr.wrapping_add(size);
let page = addr >> RISCV_PAGE_SHIFTS;
let page_end = (addr_end - 1) >> RISCV_PAGE_SHIFTS;
Expand Down
9 changes: 6 additions & 3 deletions src/snapshot2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,6 @@ impl<I: Clone + PartialEq, D: DataSource<I>> Snapshot2Context<I, D> {
pub fn make_snapshot<M: SupportMachine>(&self, machine: &mut M) -> Result<Snapshot2<I>, Error> {
let mut dirty_pages: Vec<(u64, u8, Vec<u8>)> = vec![];
for i in 0..machine.memory().memory_pages() as u64 {
if self.pages.contains_key(&i) {
continue;
}
let flag = machine.memory_mut().fetch_flag(i)?;
if flag & FLAG_DIRTY == 0 {
continue;
Expand All @@ -182,6 +179,12 @@ impl<I: Clone + PartialEq, D: DataSource<I>> Snapshot2Context<I, D> {
let mut pages: Vec<u64> = self.pages.keys().copied().collect();
pages.sort_unstable();
for page in pages {
// Some pages might be marked as cached pages from data source, but receives
// memory writes later(and marked as dirty). We are safely skipping those pages
// here as they will be gathered as dirty pages.
if machine.memory_mut().fetch_flag(page)? & FLAG_DIRTY != 0 {
continue;
}
let address = page * PAGE_SIZE;
let (id, offset, flag) = &self.pages[&page];
let mut appended_to_last = false;
Expand Down
39 changes: 37 additions & 2 deletions tests/test_resume2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use ckb_vm::machine::{
use ckb_vm::memory::{sparse::SparseMemory, wxorx::WXorXMemory};
use ckb_vm::registers::{A0, A1, A7};
use ckb_vm::snapshot2::{DataSource, Snapshot2, Snapshot2Context};
use ckb_vm::{DefaultMachineBuilder, Error, Register, Syscalls, ISA_A, ISA_IMC};
use ckb_vm::{DefaultMachineBuilder, Error, Memory, Register, Syscalls, ISA_A, ISA_IMC};
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
Expand Down Expand Up @@ -531,7 +531,7 @@ impl Machine {

#[cfg(not(feature = "enable-chaos-mode-by-default"))]
fn full_memory(&mut self) -> Result<Bytes, Error> {
use ckb_vm::{Memory, DEFAULT_MEMORY_SIZE};
use ckb_vm::DEFAULT_MEMORY_SIZE;
use Machine::*;
match self {
Asm(inner, _) => inner
Expand Down Expand Up @@ -659,3 +659,38 @@ pub fn test_store_bytes_twice() {

assert_eq!(mem1, mem2);
}

#[cfg(not(feature = "enable-chaos-mode-by-default"))]
#[test]
pub fn test_mixing_snapshot2_writes_with_machine_raw_writes() {
let data_source = load_program("tests/programs/sc_after_snapshot");

let mut machine = MachineTy::Asm.build(data_source.clone(), VERSION2);
machine.set_max_cycles(u64::MAX);
machine.load_program(&vec!["main".into()]).unwrap();

match machine {
Machine::Asm(ref mut inner, ref ctx) => {
ctx.lock()
.unwrap()
.store_bytes(&mut inner.machine, 0, &DATA_ID, 0, 29186)
.unwrap();
inner
.machine
.memory_mut()
.store_bytes(0, &vec![0x42; 29186])
.unwrap();
}
_ => unimplemented!(),
}

let mem1 = machine.full_memory().unwrap();

let snapshot = machine.snapshot().unwrap();
let mut machine2 = MachineTy::Asm.build(data_source.clone(), VERSION2);
machine2.resume(snapshot).unwrap();
machine2.set_max_cycles(u64::MAX);
let mem2 = machine2.full_memory().unwrap();

assert_eq!(mem1, mem2);
}

0 comments on commit bae0fb8

Please sign in to comment.