Skip to content

Commit 8b264ff

Browse files
hanliyanglanlanxiyiji
authored andcommitted
KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest
hygon inclusion category: feature CVE: NA --------------------------- VMCB.control.ghcb_gpa contains necessary info to support runtime CSV2 guest. At present, it includes the following points: 1. For GHCB MSR protocol, ghcb_gpa stores the negotiation result 2. For GHCB page protocol, ghcb_gpa stores the GPA of GHCB page In addition, AP VCPU's SIPI state and GHCB page mapping state are temporarily stored in KVM. When CSV2 guest was migrated to the recipient, KVM needs to restore VMCB.control.ghcb_gpa, VCPU's SIPI state and GHCB page mapping state on the source side. This patch is to support export MSR_AMD64_SEV_ES_GHCB to userspace. KVM can collect all the infos dictated above and return to userspace if userspace request to get MSR_AMD64_SEV_ES_GHCB, and KVM can restore all the infos dictated above if userspace request to set MSR_AMD64_SEV_ES_GHCB. Signed-off-by: hanliyang <hanliyang@hygon.cn> Link: #354 (cherry picked from commit ef239c4) Signed-off-by: Wentao Guan <guanwentao@uniontech.com> Conflicts: arch/x86/kvm/svm/svm.h arch/x86/kvm/x86.c include/uapi/linux/kvm.h
1 parent 140ebe1 commit 8b264ff

7 files changed

Lines changed: 219 additions & 0 deletions

File tree

arch/x86/kvm/svm/csv.c

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -855,6 +855,124 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
855855
return r;
856856
}
857857

858+
static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa)
859+
{
860+
if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
861+
/* Unable to map GHCB from guest */
862+
vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n",
863+
ghcb_gpa);
864+
865+
svm->sev_es.receiver_ghcb_map_fail = true;
866+
return -EINVAL;
867+
}
868+
869+
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
870+
svm->sev_es.receiver_ghcb_map_fail = false;
871+
872+
pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa);
873+
874+
return 0;
875+
}
876+
877+
static bool is_ghcb_msr_protocol(u64 ghcb_val)
878+
{
879+
return !!(ghcb_val & GHCB_MSR_INFO_MASK);
880+
}
881+
882+
/*
883+
* csv_get_msr return msr data to the userspace.
884+
*
885+
* Return 0 if get msr success.
886+
*/
887+
int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
888+
{
889+
struct vcpu_svm *svm = to_svm(vcpu);
890+
891+
switch (msr_info->index) {
892+
case MSR_AMD64_SEV_ES_GHCB:
893+
/* Only support userspace get from vmcb.control.ghcb_gpa */
894+
if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm))
895+
return 1;
896+
897+
msr_info->data = svm->vmcb->control.ghcb_gpa;
898+
899+
/* Only set status bits when using GHCB page protocol */
900+
if (msr_info->data &&
901+
!is_ghcb_msr_protocol(msr_info->data)) {
902+
if (svm->sev_es.ghcb)
903+
msr_info->data |= GHCB_MSR_MAPPED_MASK;
904+
905+
if (svm->sev_es.received_first_sipi)
906+
msr_info->data |=
907+
GHCB_MSR_RECEIVED_FIRST_SIPI_MASK;
908+
}
909+
break;
910+
default:
911+
return 1;
912+
}
913+
return 0;
914+
}
915+
916+
/*
917+
* csv_set_msr set msr data from the userspace.
918+
*
919+
* Return 0 if set msr success.
920+
*/
921+
int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
922+
{
923+
struct vcpu_svm *svm = to_svm(vcpu);
924+
u32 ecx = msr_info->index;
925+
u64 data = msr_info->data;
926+
927+
switch (ecx) {
928+
case MSR_AMD64_SEV_ES_GHCB:
929+
/* Only support userspace set to vmcb.control.ghcb_gpa */
930+
if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm))
931+
return 1;
932+
933+
/*
934+
* Value 0 means uninitialized userspace MSR data, userspace
935+
* need get the initial MSR data afterwards.
936+
*/
937+
if (!data)
938+
return 0;
939+
940+
/* Extract status info when using GHCB page protocol */
941+
if (!is_ghcb_msr_protocol(data)) {
942+
if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) {
943+
/*
944+
* This happened on the recipient of migration,
945+
* should return error if cannot map the ghcb
946+
* page.
947+
*/
948+
if (csv2_map_ghcb_gpa(to_svm(vcpu),
949+
data & ~GHCB_MSR_KVM_STATUS_MASK))
950+
return 1;
951+
}
952+
953+
if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK)
954+
svm->sev_es.received_first_sipi = true;
955+
956+
data &= ~GHCB_MSR_KVM_STATUS_MASK;
957+
}
958+
959+
svm->vmcb->control.ghcb_gpa = data;
960+
break;
961+
default:
962+
return 1;
963+
}
964+
return 0;
965+
}
966+
967+
bool csv_has_emulated_ghcb_msr(struct kvm *kvm)
968+
{
969+
/* this should be determined after KVM_CREATE_VM. */
970+
if (kvm && !sev_es_guest(kvm))
971+
return false;
972+
973+
return true;
974+
}
975+
858976
void csv_exit(void)
859977
{
860978
}

arch/x86/kvm/svm/csv.h

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,15 @@ void csv_exit(void);
5959

6060
int csv_alloc_trans_mempool(void);
6161
void csv_free_trans_mempool(void);
62+
int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
63+
int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
64+
bool csv_has_emulated_ghcb_msr(struct kvm *kvm);
65+
66+
static inline bool csv2_state_unstable(struct vcpu_svm *svm)
67+
{
68+
return svm->sev_es.receiver_ghcb_map_fail;
69+
}
70+
6271

6372
#else /* !CONFIG_HYGON_CSV */
6473

@@ -67,7 +76,48 @@ static inline void csv_exit(void) { }
6776

6877
static inline int csv_alloc_trans_mempool(void) { return 0; }
6978
static inline void csv_free_trans_mempool(void) { }
79+
static inline
80+
int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; }
81+
static inline
82+
int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; }
83+
static inline bool csv_has_emulated_ghcb_msr(struct kvm *kvm) { return false; }
84+
static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return false; }
7085

7186
#endif /* CONFIG_HYGON_CSV */
7287

88+
#include <asm/sev-common.h>
89+
90+
/*
91+
* CSV2 live migration support:
92+
* If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol,
93+
* reuse bits [52-63] to indicate vcpu status. The following status are
94+
* currently included:
95+
* * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB
96+
* page may be filled with GPRs before VMRUN, so we must
97+
* remap GHCB page on the recipient's side.
98+
* * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse
99+
* these bits for received_first_sipi is acceptable cause
100+
* runtime stage of guest's linux only applies GHCB page
101+
* protocol.
102+
* It's unlikely that the migration encounter other stages
103+
* of guest's linux. Once encountered, AP bringup may fail
104+
* which will not impact user payload.
105+
* Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail)
106+
*/
107+
#define GHCB_MSR_KVM_STATUS_POS 52
108+
#define GHCB_MSR_KVM_STATUS_BITS 12
109+
#define GHCB_MSR_KVM_STATUS_MASK \
110+
((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \
111+
<< GHCB_MSR_KVM_STATUS_POS)
112+
#define GHCB_MSR_MAPPED_POS 63
113+
#define GHCB_MSR_MAPPED_BITS 1
114+
#define GHCB_MSR_MAPPED_MASK \
115+
((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \
116+
<< GHCB_MSR_MAPPED_POS)
117+
#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62
118+
#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1
119+
#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \
120+
((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \
121+
<< GHCB_MSR_RECEIVED_FIRST_SIPI_POS)
122+
73123
#endif /* __SVM_CSV_H */

arch/x86/kvm/svm/svm.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2779,6 +2779,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
27792779
case MSR_AMD64_DE_CFG:
27802780
msr_info->data = svm->msr_decfg;
27812781
break;
2782+
case MSR_AMD64_SEV_ES_GHCB:
2783+
/* HYGON CSV2 support export this MSR to userspace */
2784+
if (is_x86_vendor_hygon())
2785+
return csv_get_msr(vcpu, msr_info);
2786+
else
2787+
return 1;
27822788
default:
27832789
return kvm_get_msr_common(vcpu, msr_info);
27842790
}
@@ -3038,6 +3044,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30383044
svm->msr_decfg = data;
30393045
break;
30403046
}
3047+
case MSR_AMD64_SEV_ES_GHCB:
3048+
/* HYGON CSV2 support update this MSR from userspace */
3049+
if (is_x86_vendor_hygon())
3050+
return csv_set_msr(vcpu, msr);
3051+
else
3052+
return 1;
30413053
default:
30423054
return kvm_set_msr_common(vcpu, msr);
30433055
}
@@ -4207,6 +4219,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
42074219

42084220
trace_kvm_entry(vcpu, force_immediate_exit);
42094221

4222+
/*
4223+
* For receipient side of CSV2 guest, fake the exit code as SVM_EXIT_ERR
4224+
* and return directly if failed to mapping the necessary GHCB page.
4225+
* When handling the exit code afterwards, it can exit to userspace and
4226+
* stop the guest.
4227+
*/
4228+
if (is_x86_vendor_hygon() && sev_es_guest(vcpu->kvm)) {
4229+
if (csv2_state_unstable(svm)) {
4230+
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
4231+
return EXIT_FASTPATH_NONE;
4232+
}
4233+
}
4234+
42104235
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
42114236
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
42124237
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
@@ -4401,6 +4426,12 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
44014426
if (kvm && sev_es_guest(kvm))
44024427
return false;
44034428
break;
4429+
case MSR_AMD64_SEV_ES_GHCB:
4430+
/* HYGON CSV2 support emulate this MSR */
4431+
if (is_x86_vendor_hygon())
4432+
return csv_has_emulated_ghcb_msr(kvm);
4433+
else
4434+
return false;
44044435
default:
44054436
break;
44064437
}

arch/x86/kvm/svm/svm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,10 @@ struct vcpu_sev_es_state {
249249
gpa_t snp_vmsa_gpa;
250250
bool snp_ap_waiting_for_reset;
251251
bool snp_has_guest_vmsa;
252+
#ifdef CONFIG_HYGON_CSV
253+
/* migrated ghcb mapping state for HYGON CSV2 */
254+
bool receiver_ghcb_map_fail;
255+
#endif
252256
};
253257

254258
struct vcpu_svm {

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7075,6 +7075,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
70757075
return nested;
70767076
case MSR_AMD64_VIRT_SPEC_CTRL:
70777077
case MSR_AMD64_TSC_RATIO:
7078+
case MSR_AMD64_SEV_ES_GHCB:
70787079
/* This is AMD only. */
70797080
return false;
70807081
default:

arch/x86/kvm/x86.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,8 @@ static const u32 emulated_msrs_all[] = {
454454

455455
MSR_K7_HWCR,
456456
MSR_KVM_POLL_CONTROL,
457+
458+
MSR_AMD64_SEV_ES_GHCB,
457459
};
458460

459461
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
@@ -5015,6 +5017,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
50155017
case KVM_CAP_READONLY_MEM:
50165018
r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
50175019
break;
5020+
case KVM_CAP_SEV_ES_GHCB:
5021+
r = 0;
5022+
5023+
/* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB,
5024+
* but only CSV2 guest support export to emulate
5025+
* MSR_AMD64_SEV_ES_GHCB.
5026+
*/
5027+
if (is_x86_vendor_hygon())
5028+
r = static_call(kvm_x86_has_emulated_msr)(kvm,
5029+
MSR_AMD64_SEV_ES_GHCB);
5030+
break;
50185031
default:
50195032
break;
50205033
}

include/uapi/linux/kvm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -964,6 +964,8 @@ struct kvm_enable_cap {
964964
#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
965965
#define KVM_CAP_GUEST_MEMFD_FLAGS 244
966966

967+
#define KVM_CAP_SEV_ES_GHCB 500
968+
967969
struct kvm_irq_routing_irqchip {
968970
__u32 irqchip;
969971
__u32 pin;

0 commit comments

Comments
 (0)