/
tdx.h
209 lines (172 loc) · 5.48 KB
/
tdx.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_TDX_H
#define __KVM_X86_TDX_H
#include <linux/list.h>
#include <linux/kvm_host.h>
#include "posted_intr.h"
#include "tdx_errno.h"
#include "tdx_arch.h"
#include "tdx_ops.h"
#ifdef CONFIG_INTEL_TDX_HOST
struct tdx_td_page {
unsigned long va;
hpa_t pa;
bool added;
};
struct kvm_tdx {
struct kvm kvm;
struct tdx_td_page tdr;
struct tdx_td_page *tdcs;
u64 attributes;
u64 xfam;
int hkid;
int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
bool finalized;
u64 tsc_offset;
};
struct vcpu_tdx {
struct kvm_vcpu vcpu;
struct tdx_td_page tdvpr;
struct tdx_td_page *tdvpx;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
bool initialized;
};
#define TDX_MAX_NR_CPUID_CONFIGS \
((sizeof(struct tdsysinfo_struct) - \
offsetof(struct tdsysinfo_struct, cpuid_configs)) \
/ sizeof(struct tdx_cpuid_config))
struct tdx_capabilities {
u8 tdcs_nr_pages;
u8 tdvpx_nr_pages;
u64 attrs_fixed0;
u64 attrs_fixed1;
u64 xfam_fixed0;
u64 xfam_fixed1;
u32 nr_cpuid_configs;
struct tdx_cpuid_config cpuid_configs[TDX_MAX_NR_CPUID_CONFIGS];
};
static inline bool is_td(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_TDX_VM;
}
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
{
return is_td(vcpu->kvm);
}
static inline bool is_debug_td(struct kvm_vcpu *vcpu)
{
return !vcpu->arch.guest_state_protected;
}
static inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
{
return container_of(kvm, struct kvm_tdx, kvm);
}
static inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_tdx, vcpu);
}
static inline bool is_td_initialized(struct kvm *kvm)
{
return !!kvm->max_vcpus;
}
static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
{
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && (field) & 0x1,
"Read/Write to TD VMCS *_HIGH fields not supported");
BUILD_BUG_ON(bits != 16 && bits != 32 && bits != 64);
BUILD_BUG_ON_MSG(bits != 64 && __builtin_constant_p(field) &&
(((field) & 0x6000) == 0x2000 ||
((field) & 0x6000) == 0x6000),
"Invalid TD VMCS access for 64-bit field");
BUILD_BUG_ON_MSG(bits != 32 && __builtin_constant_p(field) &&
((field) & 0x6000) == 0x4000,
"Invalid TD VMCS access for 32-bit field");
BUILD_BUG_ON_MSG(bits != 16 && __builtin_constant_p(field) &&
((field) & 0x6000) == 0x0000,
"Invalid TD VMCS access for 16-bit field");
}
static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
u32 field) \
{ \
struct tdx_ex_ret ex_ret; \
u64 err; \
\
tdvps_##lclass##_check(field, bits); \
err = tdh_vp_rd(tdx->tdvpr.pa, TDVPS_##uclass(field), &ex_ret); \
if (unlikely(err)) { \
pr_err("TDH_VP_RD["#uclass".0x%x] failed: 0x%llx\n", \
field, err); \
return 0; \
} \
return (u##bits)ex_ret.r8; \
} \
static __always_inline void td_##lclass##_write##bits(struct vcpu_tdx *tdx, \
u32 field, u##bits val) \
{ \
struct tdx_ex_ret ex_ret; \
u64 err; \
\
tdvps_##lclass##_check(field, bits); \
err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), val, \
GENMASK_ULL(bits - 1, 0), &ex_ret); \
if (unlikely(err)) \
pr_err("TDH_VP_WR["#uclass".0x%x] = 0x%llx failed: 0x%llx\n", \
field, (u64)val, err); \
} \
static __always_inline void td_##lclass##_setbit##bits(struct vcpu_tdx *tdx, \
u32 field, u64 bit) \
{ \
struct tdx_ex_ret ex_ret; \
u64 err; \
\
tdvps_##lclass##_check(field, bits); \
err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), bit, bit, \
&ex_ret); \
if (unlikely(err)) \
pr_err("TDH_VP_WR["#uclass".0x%x] |= 0x%llx failed: 0x%llx\n", \
field, bit, err); \
} \
static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
u32 field, u64 bit) \
{ \
struct tdx_ex_ret ex_ret; \
u64 err; \
\
tdvps_##lclass##_check(field, bits); \
err = tdh_vp_wr(tdx->tdvpr.pa, TDVPS_##uclass(field), 0, bit, \
&ex_ret); \
if (unlikely(err)) \
pr_err("TDH_VP_WR["#uclass".0x%x] &= ~0x%llx failed: 0x%llx\n", \
field, bit, err); \
}
TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
{
struct tdx_ex_ret ex_ret;
u64 err;
err = tdh_mng_rd(kvm_tdx->tdr.pa, TDCS_EXEC(field), &ex_ret);
if (unlikely(err)) {
pr_err("TDH_MNG_RD[EXEC.0x%x] failed: 0x%llx\n", field, err);
return 0;
}
return ex_ret.r8;
}
#else
struct kvm_tdx;
struct vcpu_tdx;
static inline bool is_td(struct kvm *kvm) { return false; }
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
static inline bool is_debug_td(struct kvm_vcpu *vcpu) { return false; }
static inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm) { return NULL; }
static inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu) { return NULL; }
#endif /* CONFIG_INTEL_TDX_HOST */
#endif /* __KVM_X86_TDX_H */