Skip to content

Commit

Permalink
KVM: add support for AMD nested live migration
Browse files Browse the repository at this point in the history
Support for nested guest live migration is part of Linux 5.8, add the
corresponding code to QEMU.  The migration format consists of a few
flags, is an opaque 4k blob.

The blob is in VMCB format (the control area represents the L1 VMCB
control fields, the save area represents the pre-vmentry state; KVM does
not use the host save area since the AMD manual allows that) but QEMU
does not really care about that.  However, the flags need to be
copied to hflags/hflags2 and back.

In addition, support for retrieving and setting the AMD nested virtualization
states allows the L1 guest to be reset while running a nested guest, but
a small bug in CPU reset needs to be fixed for that to work.

Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
bonzini committed Jul 10, 2020
1 parent 418b0f9 commit b16c0e2
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 9 deletions.
1 change: 1 addition & 0 deletions target/i386/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5968,6 +5968,7 @@ static void x86_cpu_reset(DeviceState *dev)
/* init to reset state */

env->hflags2 |= HF2_GIF_MASK;
env->hflags &= ~HF_GUEST_MASK;

cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = ~0x0;
Expand Down
5 changes: 5 additions & 0 deletions target/i386/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -2118,6 +2118,11 @@ static inline bool cpu_has_vmx(CPUX86State *env)
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
}

static inline bool cpu_has_svm(CPUX86State *env)
{
return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
}

/*
* In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
* Since it was set, CR4.VMXE must remain set as long as vCPU is in
Expand Down
42 changes: 34 additions & 8 deletions target/i386/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1840,16 +1840,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));

if (cpu_has_vmx(env)) {
if (cpu_has_vmx(env) || cpu_has_svm(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr;

env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;

vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull;
if (cpu_has_vmx(env)) {
vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull;
}
}
}

Expand Down Expand Up @@ -3873,6 +3875,20 @@ static int kvm_put_nested_state(X86CPU *cpu)
return 0;
}

/*
* Copy flags that are affected by reset from env->hflags and env->hflags2.
*/
if (env->hflags & HF_GUEST_MASK) {
env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
} else {
env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
}
if (env->hflags2 & HF2_GIF_MASK) {
env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
} else {
env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
}

assert(env->nested_state->size <= max_nested_state_len);
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
}
Expand Down Expand Up @@ -3901,11 +3917,19 @@ static int kvm_get_nested_state(X86CPU *cpu)
return ret;
}

/*
* Copy flags that are affected by reset to env->hflags and env->hflags2.
*/
if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
env->hflags |= HF_GUEST_MASK;
} else {
env->hflags &= ~HF_GUEST_MASK;
}
if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
env->hflags2 |= HF2_GIF_MASK;
} else {
env->hflags2 &= ~HF2_GIF_MASK;
}

return ret;
}
Expand All @@ -3917,6 +3941,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level)

assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));

/* must be before kvm_put_nested_state so that EFER.SVME is set */
ret = kvm_put_sregs(x86_cpu);
if (ret < 0) {
return ret;
}

if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_nested_state(x86_cpu);
if (ret < 0) {
Expand Down Expand Up @@ -3950,10 +3980,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
ret = kvm_put_sregs(x86_cpu);
if (ret < 0) {
return ret;
}
/* must be before kvm_put_msrs */
ret = kvm_inject_mce_oldstyle(x86_cpu);
if (ret < 0) {
Expand Down
31 changes: 30 additions & 1 deletion target/i386/machine.c
Original file line number Diff line number Diff line change
Expand Up @@ -1071,13 +1071,41 @@ static const VMStateDescription vmstate_vmx_nested_state = {
}
};

static bool svm_nested_state_needed(void *opaque)
{
struct kvm_nested_state *nested_state = opaque;

/*
* HF_GUEST_MASK and HF2_GIF_MASK are already serialized
* via hflags and hflags2, all that's left is the opaque
* nested state blob.
*/
return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
nested_state->size > offsetof(struct kvm_nested_state, data));
}

static const VMStateDescription vmstate_svm_nested_state = {
.name = "cpu/kvm_nested_state/svm",
.version_id = 1,
.minimum_version_id = 1,
.needed = svm_nested_state_needed,
.fields = (VMStateField[]) {
VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
struct kvm_nested_state,
KVM_STATE_NESTED_SVM_VMCB_SIZE),
VMSTATE_END_OF_LIST()
}
};

static bool nested_state_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;

return (env->nested_state &&
vmx_nested_state_needed(env->nested_state));
(vmx_nested_state_needed(env->nested_state) ||
svm_nested_state_needed(env->nested_state)));
}

static int nested_state_post_load(void *opaque, int version_id)
Expand Down Expand Up @@ -1139,6 +1167,7 @@ static const VMStateDescription vmstate_kvm_nested_state = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state,
&vmstate_svm_nested_state,
NULL
}
};
Expand Down

0 comments on commit b16c0e2

Please sign in to comment.