Skip to content

Commit 1f3da93

Browse files
lifeixlijinxia
authored andcommitted
hv: refine atomic_load/store_xxx name
rename atomic_load/store_xxx32 to atomic_load/store rename atomic_load/store_xxx64 to atomic_load64/store64 Signed-off-by: Li, Fei1 <fei1.li@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 336a888 commit 1f3da93

File tree

10 files changed

+47
-78
lines changed

10 files changed

+47
-78
lines changed

hypervisor/arch/x86/assign.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ is_entry_invalid(struct ptdev_remapping_info *entry)
9898
static inline bool
9999
is_entry_active(struct ptdev_remapping_info *entry)
100100
{
101-
return atomic_load_acq_int(&entry->active) == ACTIVE_FLAG;
101+
return atomic_load((int *)&entry->active) == ACTIVE_FLAG;
102102
}
103103

104104
/* require ptdev_lock protect */

hypervisor/arch/x86/guest/vcpu.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,9 @@ int create_vcpu(int cpu_id, struct vm *vm, struct vcpu **rtn_vcpu_handle)
8282
*/
8383
vcpu->vcpu_id = atomic_xadd(&vm->hw.created_vcpus, 1);
8484
/* vm->hw.vcpu_array[vcpu->vcpu_id] = vcpu; */
85-
atomic_store_rel_64(
86-
(unsigned long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
87-
(unsigned long)vcpu);
85+
atomic_store64(
86+
(long *)&vm->hw.vcpu_array[vcpu->vcpu_id],
87+
(long)vcpu);
8888

8989
ASSERT(vcpu->vcpu_id < vm->hw.num_vcpus,
9090
"Allocated vcpu_id is out of range!");
@@ -221,9 +221,9 @@ int destroy_vcpu(struct vcpu *vcpu)
221221
ASSERT(vcpu != NULL, "Incorrect arguments");
222222

223223
/* vcpu->vm->hw.vcpu_array[vcpu->vcpu_id] = NULL; */
224-
atomic_store_rel_64(
225-
(unsigned long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
226-
(unsigned long)NULL);
224+
atomic_store64(
225+
(long *)&vcpu->vm->hw.vcpu_array[vcpu->vcpu_id],
226+
(long)NULL);
227227

228228
atomic_dec(&vcpu->vm->hw.created_vcpus);
229229

@@ -282,13 +282,13 @@ void pause_vcpu(struct vcpu *vcpu, enum vcpu_state new_state)
282282
vcpu->state = new_state;
283283

284284
get_schedule_lock(vcpu->pcpu_id);
285-
if (atomic_load_acq_32(&vcpu->running) == 1) {
285+
if (atomic_load(&vcpu->running) == 1) {
286286
remove_vcpu_from_runqueue(vcpu);
287287
make_reschedule_request(vcpu);
288288
release_schedule_lock(vcpu->pcpu_id);
289289

290290
if (vcpu->pcpu_id != pcpu_id) {
291-
while (atomic_load_acq_32(&vcpu->running) == 1)
291+
while (atomic_load(&vcpu->running) == 1)
292292
__asm__ __volatile("pause" ::: "memory");
293293
}
294294
} else {

hypervisor/arch/x86/guest/vlapic.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -484,7 +484,7 @@ vlapic_get_lvt(struct vlapic *vlapic, uint32_t offset)
484484
uint32_t val;
485485

486486
idx = lvt_off_to_idx(offset);
487-
val = atomic_load_acq_32(&vlapic->lvt_last[idx]);
487+
val = atomic_load((int *)&vlapic->lvt_last[idx]);
488488
return val;
489489
}
490490

@@ -547,7 +547,7 @@ vlapic_lvt_write_handler(struct vlapic *vlapic, uint32_t offset)
547547
vlapic_update_lvtt(vlapic, val);
548548

549549
*lvtptr = val;
550-
atomic_store_rel_32(&vlapic->lvt_last[idx], val);
550+
atomic_store((int *)&vlapic->lvt_last[idx], val);
551551
}
552552

553553
static void
@@ -1097,7 +1097,7 @@ vlapic_pending_intr(struct vlapic *vlapic, int *vecptr)
10971097
irrptr = &lapic->irr[0];
10981098

10991099
for (i = 7; i >= 0; i--) {
1100-
val = atomic_load_acq_int(&irrptr[i].val);
1100+
val = atomic_load((int *)&irrptr[i].val);
11011101
bitpos = fls(val);
11021102
if (bitpos >= 0) {
11031103
vector = i * 32 + bitpos;
@@ -2007,7 +2007,7 @@ apicv_pending_intr(struct vlapic *vlapic, __unused int *vecptr)
20072007

20082008
pir_desc = vlapic->pir_desc;
20092009

2010-
pending = atomic_load_acq_long(&pir_desc->pending);
2010+
pending = atomic_load64((long *)&pir_desc->pending);
20112011
if (!pending)
20122012
return 0;
20132013

hypervisor/arch/x86/guest/vm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ int create_vm(struct vm_description *vm_desc, struct vm **rtn_vm)
127127
snprintf(&vm->attr.name[0], MAX_VM_NAME_LEN, "vm_%d",
128128
vm->attr.id);
129129

130-
atomic_store_rel_int(&vm->hw.created_vcpus, 0);
130+
atomic_store(&vm->hw.created_vcpus, 0);
131131

132132
/* gpa_lowtop are used for system start up */
133133
vm->hw.gpa_lowtop = 0;

hypervisor/common/hypercall.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ static void complete_request(struct vcpu *vcpu)
363363
req_buf = (struct vhm_request_buffer *)
364364
vcpu->vm->sw.io_shared_page;
365365
req_buf->req_queue[vcpu->vcpu_id].valid = false;
366-
atomic_store_rel_32(&vcpu->ioreq_pending, 0);
366+
atomic_store(&vcpu->ioreq_pending, 0);
367367

368368
return;
369369
}
@@ -900,7 +900,7 @@ int acrn_insert_request_wait(struct vcpu *vcpu, struct vhm_request *req)
900900
fire_vhm_interrupt();
901901

902902
/* pause vcpu, wait for VHM to handle the MMIO request */
903-
atomic_store_rel_32(&vcpu->ioreq_pending, 1);
903+
atomic_store(&vcpu->ioreq_pending, 1);
904904
pause_vcpu(vcpu, VCPU_PAUSED);
905905

906906
return 0;

hypervisor/common/schedule.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ static void context_switch_out(struct vcpu *vcpu)
146146
/* cancel event(int, gp, nmi and exception) injection */
147147
cancel_event_injection(vcpu);
148148

149-
atomic_store_rel_32(&vcpu->running, 0);
149+
atomic_store(&vcpu->running, 0);
150150
/* do prev vcpu context switch out */
151151
/* For now, we don't need to invalid ept.
152152
* But if we have more than one vcpu on one pcpu,
@@ -163,7 +163,7 @@ static void context_switch_in(struct vcpu *vcpu)
163163
if (vcpu == NULL)
164164
return;
165165

166-
atomic_store_rel_32(&vcpu->running, 1);
166+
atomic_store(&vcpu->running, 1);
167167
/* FIXME:
168168
* Now, we don't need to load new vcpu VMCS because
169169
* we only do switch between vcpu loop and idle loop.

hypervisor/debug/serial_internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ struct tgt_uart {
156156
uint64_t base_address;
157157
uint32_t clock_frequency;
158158
uint32_t buffer_size;
159-
unsigned int open_count;
159+
int open_count;
160160

161161
/* Target specific function pointers. */
162162
int (*init)(struct tgt_uart *tgt_uart);

hypervisor/debug/uart16550.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,10 +161,10 @@ static int uart16550_init(struct tgt_uart *tgt_uart)
161161
status = -ENODEV;
162162
} else {
163163
if (strcmp(tgt_uart->uart_id, "STDIO") == 0) {
164-
atomic_store_rel_int(&tgt_uart->open_count, 0);
164+
atomic_store(&tgt_uart->open_count, 0);
165165
} else {
166166
/* set open count to 1 to prevent open */
167-
atomic_store_rel_int(&tgt_uart->open_count, 1);
167+
atomic_store(&tgt_uart->open_count, 1);
168168
status = -EINVAL;
169169
}
170170
}

hypervisor/include/arch/x86/guest/vcpu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -254,8 +254,8 @@ struct vcpu {
254254
unsigned long pending_pre_work; /* any pre work pending? */
255255
bool launched; /* Whether the vcpu is launched on target pcpu */
256256
unsigned int paused_cnt; /* how many times vcpu is paused */
257-
unsigned int running; /* vcpu is picked up and run? */
258-
unsigned int ioreq_pending; /* ioreq is ongoing or not? */
257+
int running; /* vcpu is picked up and run? */
258+
int ioreq_pending; /* ioreq is ongoing or not? */
259259

260260
struct vhm_request req; /* used by io/ept emulation */
261261
struct mem_io mmio; /* used by io/ept emulation */

hypervisor/include/lib/atomic.h

Lines changed: 24 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,30 @@
3232

3333
#define BUS_LOCK "lock ; "
3434

35+
#define build_atomic_load(name, size, type, ptr) \
36+
static inline type name(const volatile type *ptr) \
37+
{ \
38+
type ret; \
39+
asm volatile("mov" size " %1,%0" \
40+
: "=r" (ret) \
41+
: "m" (*ptr) \
42+
: "cc", "memory"); \
43+
return ret; \
44+
}
45+
build_atomic_load(atomic_load, "l", int, p)
46+
build_atomic_load(atomic_load64, "q", long, p)
47+
48+
#define build_atomic_store(name, size, type, ptr, v) \
49+
static inline void name(volatile type *ptr, type v) \
50+
{ \
51+
asm volatile("mov" size " %1,%0" \
52+
: "=m" (*ptr) \
53+
: "r" (v) \
54+
: "cc", "memory"); \
55+
}
56+
build_atomic_store(atomic_store, "l", int, p, v)
57+
build_atomic_store(atomic_store64, "q", long, p, v)
58+
3559
/*
3660
* #define atomic_set_int(P, V) (*(unsigned int *)(P) |= (V))
3761
*/
@@ -135,56 +159,6 @@ static inline long atomic_swap_long(unsigned long *p, unsigned long v)
135159
*/
136160
#define atomic_readandclear_long(p) atomic_swap_long(p, 0)
137161

138-
/*
139-
* #define atomic_load_acq_int(P) (*(unsigned int*)(P))
140-
*/
141-
static inline int atomic_load_acq_int(unsigned int *p)
142-
{
143-
int ret;
144-
145-
__asm __volatile("movl %1,%0"
146-
: "=r"(ret)
147-
: "m" (*p)
148-
: "cc", "memory");
149-
return ret;
150-
}
151-
152-
/*
153-
* #define atomic_store_rel_int(P, V) (*(unsigned int *)(P) = (V))
154-
*/
155-
static inline void atomic_store_rel_int(unsigned int *p, unsigned int v)
156-
{
157-
__asm __volatile("movl %1,%0"
158-
: "=m" (*p)
159-
: "r" (v)
160-
: "cc", "memory");
161-
}
162-
163-
/*
164-
* #define atomic_load_acq_long(P) (*(unsigned long*)(P))
165-
*/
166-
static inline long atomic_load_acq_long(unsigned long *p)
167-
{
168-
long ret;
169-
170-
__asm __volatile("movq %1,%0"
171-
: "=r"(ret)
172-
: "m" (*p)
173-
: "cc", "memory");
174-
return ret;
175-
}
176-
177-
/*
178-
* #define atomic_store_rel_long(P, V) (*(unsigned long *)(P) = (V))
179-
*/
180-
static inline void atomic_store_rel_long(unsigned long *p, unsigned long v)
181-
{
182-
__asm __volatile("movq %1,%0"
183-
: "=m" (*p)
184-
: "r" (v)
185-
: "cc", "memory");
186-
}
187-
188162
static inline int atomic_cmpxchg_int(unsigned int *p,
189163
int old, int new)
190164
{
@@ -198,11 +172,6 @@ static inline int atomic_cmpxchg_int(unsigned int *p,
198172
return ret;
199173
}
200174

201-
#define atomic_load_acq_32 atomic_load_acq_int
202-
#define atomic_store_rel_32 atomic_store_rel_int
203-
#define atomic_load_acq_64 atomic_load_acq_long
204-
#define atomic_store_rel_64 atomic_store_rel_long
205-
206175
#define build_atomic_xadd(name, size, type, ptr, v) \
207176
static inline type name(type *ptr, type v) \
208177
{ \

0 commit comments

Comments
 (0)