Skip to content

Commit f039d75

Browse files
fyin1acrnsi
authored andcommitted
hv: pm: enhencement platform S5 entering operation
Now, we have assumption that SOS control whether the platform should enter S5 or not. So when SOS tries enter S5, we just forward the S5 request to native port which make sure platform S5 is totally aligned with SOS S5. With higher serverity guest introduced,this assumption is not true any more. We need to extend the platform S5 process to handle higher severity guest: - For DM launched RTVM, we need to make sure these guests is off before put the whole platfrom to S5. - For pre-launched VM, there are two cases: * if os running in it support S5, we wait for guests off. * if os running in it doesn't support S5, we expect it will invoke one hypercall to notify HV to shutdown it. NOTE: this case is not supported yet. Will add it in the future. Tracked-On: #3564 Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Li, Fei1 <fei1.li@intel.com>
1 parent ce93758 commit f039d75

File tree

3 files changed

+62
-9
lines changed

3 files changed

+62
-9
lines changed

hypervisor/arch/x86/guest/pm.c

Lines changed: 56 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -130,9 +130,9 @@ int32_t vm_load_pm_s_state(struct acrn_vm *vm)
130130
return ret;
131131
}
132132

133-
static inline uint32_t s3_enabled(uint32_t pm1_cnt)
133+
static inline bool is_s3_enabled(uint32_t pm1_cnt)
134134
{
135-
return pm1_cnt & (1U << BIT_SLP_EN);
135+
return ((pm1_cnt & (1U << BIT_SLP_EN)) != 0U);
136136
}
137137

138138
static inline uint8_t get_slp_typx(uint32_t pm1_cnt)
@@ -149,6 +149,47 @@ static bool pm1ab_io_read(struct acrn_vcpu *vcpu, uint16_t addr, size_t width)
149149
return true;
150150
}
151151

152+
#define POWEROFF_TIMEOUT (5 * 60U) /* default poweroff timeout is 5 minutes */
153+
/* wait for other vm shutdown done. If POWEROFF_TIMEOUT passed and there are
154+
* still some VMs active, we will force platform power off.
155+
*
156+
* TODO:
157+
* - Let user configure whether we wait for ever till all VMs powered off or
158+
* force shutdown once pre-defined timeout hit.
159+
*/
160+
static inline void wait_for_other_vm_shutdown(struct acrn_vm *self_vm)
161+
{
162+
uint16_t vm_id;
163+
bool ready_for_s5;
164+
uint32_t timeout = POWEROFF_TIMEOUT;
165+
struct acrn_vm *vm;
166+
167+
while (timeout != 0U) {
168+
ready_for_s5 = true;
169+
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
170+
vm = get_vm_from_vmid(vm_id);
171+
172+
if ((vm != self_vm) && !is_poweroff_vm(vm)) {
173+
ready_for_s5 = false;
174+
}
175+
}
176+
177+
if (ready_for_s5) {
178+
break;
179+
} else {
180+
udelay(1000U * 1000U); /* delay 1s in each loop */
181+
}
182+
183+
timeout--;
184+
}
185+
}
186+
187+
static inline void enter_s5(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
188+
{
189+
wait_for_other_vm_shutdown(vm);
190+
host_enter_s5(vm->pm.sx_state_data, pm1a_cnt_val, pm1b_cnt_val);
191+
}
192+
152193
static inline void enter_s3(struct acrn_vm *vm, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
153194
{
154195
uint32_t guest_wakeup_vec32;
@@ -179,24 +220,30 @@ static bool pm1ab_io_write(struct acrn_vcpu *vcpu, uint16_t addr, size_t width,
179220
if (width == 2U) {
180221
uint8_t val = get_slp_typx(v);
181222

182-
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address)
183-
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1a) && (s3_enabled(v) != 0U)) {
223+
if ((addr == vm->pm.sx_state_data->pm1a_cnt.address) && is_s3_enabled(v)) {
184224

185225
if (vm->pm.sx_state_data->pm1b_cnt.address != 0UL) {
186226
pm1a_cnt_ready = v;
187227
} else {
188-
enter_s3(vm, v, 0U);
228+
if (vm->pm.sx_state_data->s3_pkg.val_pm1a == val) {
229+
enter_s3(vm, v, 0U);
230+
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1a == val) {
231+
enter_s5(vm, v, 0U);
232+
}
189233
}
190234

191235
to_write = false;
192-
193-
} else if ((addr == vm->pm.sx_state_data->pm1b_cnt.address)
194-
&& (val == vm->pm.sx_state_data->s3_pkg.val_pm1b) && (s3_enabled(v) != 0U)) {
236+
} else if ((addr == vm->pm.sx_state_data->pm1b_cnt.address) && is_s3_enabled(v)) {
195237

196238
if (pm1a_cnt_ready != 0U) {
197239
pm1a_cnt_val = pm1a_cnt_ready;
198240
pm1a_cnt_ready = 0U;
199-
enter_s3(vm, pm1a_cnt_val, v);
241+
242+
if (vm->pm.sx_state_data->s3_pkg.val_pm1b == val) {
243+
enter_s3(vm, pm1a_cnt_val, v);
244+
} else if (vm->pm.sx_state_data->s5_pkg.val_pm1b == val) {
245+
enter_s5(vm, pm1a_cnt_val, v);
246+
}
200247
} else {
201248
/* the case broke ACPI spec */
202249
pr_err("PM1B_CNT write error!");

hypervisor/arch/x86/pm.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,11 @@ void do_acpi_sx(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint
148148
} while ((s1 & (1U << BIT_WAK_STS)) == 0U);
149149
}
150150

151+
void host_enter_s5(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
152+
{
153+
do_acpi_sx(sstate_data, pm1a_cnt_val, pm1b_cnt_val);
154+
}
155+
151156
void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val)
152157
{
153158
uint64_t pmain_entry_saved;

hypervisor/include/arch/x86/host_pm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ struct acpi_reset_reg {
3131

3232
struct pm_s_state_data *get_host_sstate_data(void);
3333
void host_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
34+
void host_enter_s5(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
3435
extern void asm_enter_s3(struct pm_s_state_data *sstate_data, uint32_t pm1a_cnt_val, uint32_t pm1b_cnt_val);
3536
extern void restore_s3_context(void);
3637
struct cpu_state_info *get_cpu_pm_state_info(void);

0 commit comments

Comments
 (0)