Skip to content

Commit a06a2f2

Browse files
fyin1NanlinXie
authored andcommitted
hv: implement lowlevel S3 enter/wakeup
The S3 enter lowlevel routine saves the cpu context to memory and enter S3 state The S3 wakeup lowlevel routine restore cpu context and return. Signed-off-by: Zheng Gen <gen.zheng@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 4434910 commit a06a2f2

File tree

5 files changed

+112
-0
lines changed

5 files changed

+112
-0
lines changed

hypervisor/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,7 @@ C_SRCS += arch/x86/trusty.c
119119
C_SRCS += arch/x86/cpu_state_tbl.c
120120
C_SRCS += arch/x86/mtrr.c
121121
C_SRCS += arch/x86/pm.c
122+
S_SRCS += arch/x86/wakeup.S
122123
C_SRCS += arch/x86/guest/vcpu.c
123124
C_SRCS += arch/x86/guest/vm.c
124125
C_SRCS += arch/x86/guest/instr_emul_wrapper.c

hypervisor/arch/x86/pm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44
*/
55
#include <hypervisor.h>
66

7+
struct run_context cpu_ctx;
8+
79
void restore_msrs(void)
810
{
911
#ifdef STACK_PROTECTOR

hypervisor/arch/x86/wakeup.S

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
/*
2+
* Copyright (C) <2018> Intel Corporation
3+
* SPDX-License-Identifier: BSD-3-Clause
4+
*/
5+
#include <vcpu.h>
6+
7+
.text
8+
.align 8
9+
.code64
10+
.extern restore_msrs
11+
.extern cpu_ctx
12+
.extern load_gdtr_and_tr
13+
14+
.global __enter_s3
15+
__enter_s3:
16+
movq %rax, CPU_CONTEXT_OFFSET_RAX + cpu_ctx(%rip)
17+
movq %rbx, CPU_CONTEXT_OFFSET_RBX + cpu_ctx(%rip)
18+
movq %rcx, CPU_CONTEXT_OFFSET_RCX + cpu_ctx(%rip)
19+
movq %rdx, CPU_CONTEXT_OFFSET_RDX + cpu_ctx(%rip)
20+
movq %rdi, CPU_CONTEXT_OFFSET_RDI + cpu_ctx(%rip)
21+
movq %rsi, CPU_CONTEXT_OFFSET_RSI + cpu_ctx(%rip)
22+
movq %rbp, CPU_CONTEXT_OFFSET_RBP + cpu_ctx(%rip)
23+
movq %rsp, CPU_CONTEXT_OFFSET_RSP + cpu_ctx(%rip)
24+
movq %r8, CPU_CONTEXT_OFFSET_R8 + cpu_ctx(%rip)
25+
movq %r9, CPU_CONTEXT_OFFSET_R9 + cpu_ctx(%rip)
26+
movq %r10, CPU_CONTEXT_OFFSET_R10 + cpu_ctx(%rip)
27+
movq %r11, CPU_CONTEXT_OFFSET_R11 + cpu_ctx(%rip)
28+
movq %r12, CPU_CONTEXT_OFFSET_R12 + cpu_ctx(%rip)
29+
movq %r13, CPU_CONTEXT_OFFSET_R13 + cpu_ctx(%rip)
30+
movq %r14, CPU_CONTEXT_OFFSET_R14 + cpu_ctx(%rip)
31+
movq %r15, CPU_CONTEXT_OFFSET_R15 + cpu_ctx(%rip)
32+
33+
pushfq
34+
popq CPU_CONTEXT_OFFSET_RFLAGS + cpu_ctx(%rip)
35+
36+
sidt CPU_CONTEXT_OFFSET_IDTR + cpu_ctx(%rip)
37+
sldt CPU_CONTEXT_OFFSET_LDTR + cpu_ctx(%rip)
38+
39+
mov %cr0, %rax
40+
mov %rax, CPU_CONTEXT_OFFSET_CR0 + cpu_ctx(%rip)
41+
42+
mov %cr3, %rax
43+
mov %rax, CPU_CONTEXT_OFFSET_CR3 + cpu_ctx(%rip)
44+
45+
mov %cr4, %rax
46+
mov %rax, CPU_CONTEXT_OFFSET_CR4 + cpu_ctx(%rip)
47+
48+
wbinvd
49+
50+
/* Will add the function call to enter Sx here*/
51+
52+
53+
/*
54+
* When system resume from S3, trampoline_start64 will
55+
* jump to restore_s3_context after setup temporary stack.
56+
*/
57+
.global restore_s3_context
58+
restore_s3_context:
59+
mov CPU_CONTEXT_OFFSET_CR4 + cpu_ctx(%rip), %rax
60+
mov %rax, %cr4
61+
62+
mov CPU_CONTEXT_OFFSET_CR3 + cpu_ctx(%rip), %rax
63+
mov %rax, %cr3
64+
65+
mov CPU_CONTEXT_OFFSET_CR0 + cpu_ctx(%rip), %rax
66+
mov %rax, %cr0
67+
68+
lidt CPU_CONTEXT_OFFSET_IDTR + cpu_ctx(%rip)
69+
lldt CPU_CONTEXT_OFFSET_LDTR + cpu_ctx(%rip)
70+
71+
mov CPU_CONTEXT_OFFSET_SS + cpu_ctx(%rip), %ss
72+
mov CPU_CONTEXT_OFFSET_RSP + cpu_ctx(%rip), %rsp
73+
74+
pushq CPU_CONTEXT_OFFSET_RFLAGS + cpu_ctx(%rip)
75+
popfq
76+
77+
call load_gdtr_and_tr
78+
call restore_msrs
79+
80+
movq CPU_CONTEXT_OFFSET_RAX + cpu_ctx(%rip), %rax
81+
movq CPU_CONTEXT_OFFSET_RBX + cpu_ctx(%rip), %rbx
82+
movq CPU_CONTEXT_OFFSET_RCX + cpu_ctx(%rip), %rcx
83+
movq CPU_CONTEXT_OFFSET_RDX + cpu_ctx(%rip), %rdx
84+
movq CPU_CONTEXT_OFFSET_RDI + cpu_ctx(%rip), %rdi
85+
movq CPU_CONTEXT_OFFSET_RSI + cpu_ctx(%rip), %rsi
86+
movq CPU_CONTEXT_OFFSET_RBP + cpu_ctx(%rip), %rbp
87+
movq CPU_CONTEXT_OFFSET_R8 + cpu_ctx(%rip), %r8
88+
movq CPU_CONTEXT_OFFSET_R9 + cpu_ctx(%rip), %r9
89+
movq CPU_CONTEXT_OFFSET_R10 + cpu_ctx(%rip), %r10
90+
movq CPU_CONTEXT_OFFSET_R11 + cpu_ctx(%rip), %r11
91+
movq CPU_CONTEXT_OFFSET_R12 + cpu_ctx(%rip), %r12
92+
movq CPU_CONTEXT_OFFSET_R13 + cpu_ctx(%rip), %r13
93+
movq CPU_CONTEXT_OFFSET_R14 + cpu_ctx(%rip), %r14
94+
movq CPU_CONTEXT_OFFSET_R15 + cpu_ctx(%rip), %r15
95+
96+
retq

hypervisor/include/arch/x86/host_pm.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
/*
2+
* Copyright (C) <2018> Intel Corporation
3+
* SPDX-License-Identifier: BSD-3-Clause
4+
*/
5+
6+
#ifndef HOST_PM_H
7+
8+
extern void __enter_s3(struct vm *vm, uint32_t pm1a_cnt_val,
9+
uint32_t pm1b_cnt_val);
10+
extern void restore_s3_context(void);
11+
12+
#endif /* ARCH_X86_PM_H */

hypervisor/include/arch/x86/hv_arch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <vcpu.h>
2020
#include <trusty.h>
2121
#include <guest_pm.h>
22+
#include <host_pm.h>
2223
#include <vm.h>
2324
#include <cpuid.h>
2425
#include <mmu.h>

0 commit comments

Comments
 (0)