Skip to content

Commit

Permalink
refactor: use synchronous compaction in sync_compaction
Browse files Browse the repository at this point in the history
It seems sync wait on a event listener will potentialy cause a deadlock,
we will address this in the future.

Signed-off-by: bsbds <[email protected]>
  • Loading branch information
bsbds committed Aug 23, 2024
1 parent 313b819 commit 6d1d9ba
Showing 1 changed file with 12 additions and 32 deletions.
44 changes: 12 additions & 32 deletions crates/xline/src/storage/kv_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1121,41 +1121,21 @@ impl KvStore {
let ops = vec![WriteOp::PutScheduledCompactRevision(revision)];
// TODO: Remove the physical process logic here. It's better to move into the
// KvServer
// TODO: sync compaction task
// FIXME: madsim is single threaded, we cannot use synchronous wait here
#[cfg(not(madsim))]
{
let (event, listener) = if req.physical {
let event = Arc::new(event_listener::Event::new());
let listener = event.listen();
(Some(event), Some(listener))
} else {
(None, None)
};
if let Err(e) = self.compact_task_tx.send((revision, event)) {
panic!("the compactor exited unexpectedly: {e:?}");
}
if let Some(listener) = listener {
listener.wait();
let index = self.index();
let target_revisions = index
.compact(revision)
.into_iter()
.map(|key_rev| key_rev.as_revision().encode_to_vec())
.collect::<Vec<Vec<_>>>();
// Given that the Xline uses a lim-tree database with smaller write amplification as the storage backend , does using progressive compaction really good at improving performance?
for revision_chunk in target_revisions.chunks(1000) {
if let Err(e) = self.compact(revision_chunk) {
panic!("failed to compact revision chunk {revision_chunk:?} due to {e}");
}
}
#[cfg(madsim)]
{
let index = self.index();
let target_revisions = index
.compact(revision)
.into_iter()
.map(|key_rev| key_rev.as_revision().encode_to_vec())
.collect::<Vec<Vec<_>>>();
// Given that the Xline uses a lim-tree database with smaller write amplification as the storage backend , does using progressive compaction really good at improving performance?
for revision_chunk in target_revisions.chunks(1000) {
if let Err(e) = self.compact(revision_chunk) {
panic!("failed to compact revision chunk {revision_chunk:?} due to {e}");
}
}
if let Err(e) = self.compact_finished(revision) {
panic!("failed to set finished compact revision {revision:?} due to {e}");
}
if let Err(e) = self.compact_finished(revision) {
panic!("failed to set finished compact revision {revision:?} due to {e}");
}

self.inner.db.write_ops(ops)?;
Expand Down

0 comments on commit 6d1d9ba

Please sign in to comment.