Skip to content

Commit

Permalink
PR feedback: target vtl for unlock
Browse files Browse the repository at this point in the history
  • Loading branch information
sluck-msft committed Nov 22, 2024
1 parent c30b6e1 commit 7dbfaa7
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 62 deletions.
2 changes: 1 addition & 1 deletion openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1268,7 +1268,7 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {

let tlb_locked = self.vtls_tlb_locked.get(requesting_vtl, target_vtl);
match (tlb_locked, config.tlb_locked()) {
(true, false) => self.unlock_tlb_lock(requesting_vtl),
(true, false) => self.unlock_tlb_lock_target(requesting_vtl, target_vtl),
(false, true) => self.set_tlb_lock(requesting_vtl, target_vtl),
_ => (), // Nothing to do
};
Expand Down
128 changes: 67 additions & 61 deletions openhcl/virt_mshv_vtl/src/processor/hardware_cvm/tlb_lock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,77 +77,83 @@ impl<'a, B: HardwareIsolatedBacking> UhProcessor<'a, B> {
self.vtls_tlb_locked.set(requesting_vtl, target_vtl, true);
}

/// Unlocks the TLBs of all lower VTLs as required upon VTL exit.
pub fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
debug_assert!(unlocking_vtl != Vtl::Vtl0);
/// Unlocks the TLBs of a specific lower VTL
pub fn unlock_tlb_lock_target(&mut self, unlocking_vtl: Vtl, target_vtl: GuestVtl) {
let self_index = self.vp_index().index() as usize;
for &target_vtl in &[GuestVtl::Vtl1, GuestVtl::Vtl0][(2 - unlocking_vtl as usize)..] {
// If this VP hasn't taken a lock, no need to do anything.
if self.vtls_tlb_locked.get(unlocking_vtl, target_vtl) {
self.vtls_tlb_locked.set(unlocking_vtl, target_vtl, false);
// A memory fence is required after indicating that the target VTL is no
// longer locked, because other VPs will make decisions about how to
// handle blocking based on this information, and the loop below relies on
// those processors having an accurate view of the lock state.
std::sync::atomic::fence(Ordering::SeqCst);

// If the lock for VTL 0 is being released by VTL 2, then check
// to see whether VTL 1 also holds a lock for VTL 0. If so, no
// wait can be unblocked until VTL 1 also releases its lock.
if unlocking_vtl == Vtl::Vtl2
&& target_vtl == GuestVtl::Vtl0
&& self.vtls_tlb_locked.get(Vtl::Vtl1, GuestVtl::Vtl0)
{
return;
}
// If this VP hasn't taken a lock, no need to do anything.
if self.vtls_tlb_locked.get(unlocking_vtl, target_vtl) {
self.vtls_tlb_locked.set(unlocking_vtl, target_vtl, false);
// A memory fence is required after indicating that the target VTL is no
// longer locked, because other VPs will make decisions about how to
// handle blocking based on this information, and the loop below relies on
// those processors having an accurate view of the lock state.
std::sync::atomic::fence(Ordering::SeqCst);

// If the lock for VTL 0 is being released by VTL 2, then check
// to see whether VTL 1 also holds a lock for VTL 0. If so, no
// wait can be unblocked until VTL 1 also releases its lock.
if unlocking_vtl == Vtl::Vtl2
&& target_vtl == GuestVtl::Vtl0
&& self.vtls_tlb_locked.get(Vtl::Vtl1, GuestVtl::Vtl0)
{
return;
}

// Now we can remove ourselves from the global TLB lock.
self.cvm_partition().tlb_locked_vps[target_vtl].set_aliased(self_index, false);

// Check to see whether any other VPs are waiting for this VP to release
// the TLB lock. Note that other processors may be in the process of
// inserting themselves into this set because they may have observed that
// the TLB lock was still held on the current processor, but they will
// take responsibility for removing themselves after insertion because
// they will once again observe the TLB lock as not held. Because the set
// of blocked VPs may be changing, it must be captured locally, since the
// VP set scan below cannot safely be performed on a VP set that may be
// changing.
for blocked_vp in self.cvm_partition().tlb_lock_info[self_index][target_vtl]
// Now we can remove ourselves from the global TLB lock.
self.cvm_partition().tlb_locked_vps[target_vtl].set_aliased(self_index, false);

// Check to see whether any other VPs are waiting for this VP to release
// the TLB lock. Note that other processors may be in the process of
// inserting themselves into this set because they may have observed that
// the TLB lock was still held on the current processor, but they will
// take responsibility for removing themselves after insertion because
// they will once again observe the TLB lock as not held. Because the set
// of blocked VPs may be changing, it must be captured locally, since the
// VP set scan below cannot safely be performed on a VP set that may be
// changing.
for blocked_vp in self.cvm_partition().tlb_lock_info[self_index][target_vtl]
.blocked_vps
.clone()
.iter_ones()
{
self.cvm_partition().tlb_lock_info[self_index][target_vtl]
.blocked_vps
.clone()
.iter_ones()
{
self.cvm_partition().tlb_lock_info[self_index][target_vtl]
.blocked_vps
.set_aliased(blocked_vp, false);

// Mark the target VP as no longer blocked by the current VP.
// Note that the target VP may have already marked itself as not
// blocked if is has already noticed that the lock has already
// been released on the current VP.
let other_lock = &self.cvm_partition().tlb_lock_info[blocked_vp][target_vtl];
if other_lock.blocking_vps.set_aliased(self_index, false) {
let other_old_count =
other_lock.blocking_vp_count.fetch_sub(1, Ordering::Relaxed);

if other_old_count == 1 {
// The current VP was the last one to be removed from the
// blocking set of the target VP. If it is asleep, it must
// be woken now. Sending an IPI is sufficient to cause it to
// reevaluate the blocking state. It is not necessary to
// synchronize with its sleep state as a spurious IPI is not
// harmful.
if other_lock.sleeping.load(Ordering::SeqCst) {
self.partition.vps[blocked_vp].wake_vtl2();
}
.set_aliased(blocked_vp, false);

// Mark the target VP as no longer blocked by the current VP.
// Note that the target VP may have already marked itself as not
// blocked if is has already noticed that the lock has already
// been released on the current VP.
let other_lock = &self.cvm_partition().tlb_lock_info[blocked_vp][target_vtl];
if other_lock.blocking_vps.set_aliased(self_index, false) {
let other_old_count =
other_lock.blocking_vp_count.fetch_sub(1, Ordering::Relaxed);

if other_old_count == 1 {
// The current VP was the last one to be removed from the
// blocking set of the target VP. If it is asleep, it must
// be woken now. Sending an IPI is sufficient to cause it to
// reevaluate the blocking state. It is not necessary to
// synchronize with its sleep state as a spurious IPI is not
// harmful.
if other_lock.sleeping.load(Ordering::SeqCst) {
self.partition.vps[blocked_vp].wake_vtl2();
}
}
}
}
}
}

/// Unlocks the TLBs of all lower VTLs as required upon VTL exit.
pub fn unlock_tlb_lock(&mut self, unlocking_vtl: Vtl) {
debug_assert!(unlocking_vtl != Vtl::Vtl0);

for &target_vtl in &[GuestVtl::Vtl1, GuestVtl::Vtl0][(2 - unlocking_vtl as usize)..] {
self.unlock_tlb_lock_target(unlocking_vtl, target_vtl);
}
}

/// Returns whether the VP should halt to wait for the TLB lock of the specified VTL.
pub fn should_halt_for_tlb_unlock(&mut self, target_vtl: GuestVtl) -> bool {
let self_index = self.vp_index().index() as usize;
Expand Down

0 comments on commit 7dbfaa7

Please sign in to comment.