Skip to content

Commit 8202ba0

Browse files
gzhai7jren1
authored andcommitted
HV: move common stuff from assign.c
Move common stuff, like ptdev entry and softirq, to new ptdev.c Signed-off-by: Edwin Zhai <edwin.zhai@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
1 parent 46f64b5 commit 8202ba0

File tree

5 files changed

+268
-229
lines changed

5 files changed

+268
-229
lines changed

hypervisor/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ C_SRCS += common/trusty_hypercall.c
138138
C_SRCS += common/schedule.c
139139
C_SRCS += common/vm_load.c
140140
C_SRCS += common/io_request.c
141+
C_SRCS += common/ptdev.c
141142

142143
ifdef STACK_PROTECTOR
143144
C_SRCS += common/stack_protector.c

hypervisor/arch/x86/assign.c

Lines changed: 0 additions & 174 deletions
Original file line numberDiff line numberDiff line change
@@ -6,28 +6,6 @@
66

77
#include <hypervisor.h>
88

9-
#define ACTIVE_FLAG 0x1 /* any non zero should be okay */
10-
11-
/* SOFTIRQ_DEV_ASSIGN list for all CPUs */
12-
static struct list_head softirq_dev_entry_list;
13-
/* passthrough device link */
14-
static struct list_head ptdev_list;
15-
static spinlock_t ptdev_lock;
16-
17-
/* invalid_entry for error return */
18-
static struct ptdev_remapping_info invalid_entry = {
19-
.type = PTDEV_INTR_INV,
20-
};
21-
22-
/*
23-
* entry could both be in ptdev_list and softirq_dev_entry_list.
24-
* When release entry, we need make sure entry deleted from both
25-
* lists. We have to require two locks and the lock sequence is:
26-
* ptdev_lock
27-
* softirq_dev_lock
28-
*/
29-
static spinlock_t softirq_dev_lock;
30-
319
static inline uint32_t
3210
entry_id_from_msix(uint16_t bdf, int8_t index)
3311
{
@@ -166,105 +144,6 @@ lookup_entry_by_vintx(struct vm *vm, uint8_t vpin,
166144
return entry;
167145
}
168146

169-
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
170-
{
171-
spinlock_rflags;
172-
/* enqueue request in order, SOFTIRQ_DEV_ASSIGN will pickup */
173-
spinlock_irqsave_obtain(&softirq_dev_lock);
174-
175-
/* avoid adding recursively */
176-
list_del(&entry->softirq_node);
177-
/* TODO: assert if entry already in list */
178-
list_add_tail(&entry->softirq_node,
179-
&softirq_dev_entry_list);
180-
spinlock_irqrestore_release(&softirq_dev_lock);
181-
raise_softirq(SOFTIRQ_DEV_ASSIGN);
182-
}
183-
184-
static struct ptdev_remapping_info*
185-
ptdev_dequeue_softirq(void)
186-
{
187-
struct ptdev_remapping_info *entry = NULL;
188-
189-
spinlock_rflags;
190-
spinlock_irqsave_obtain(&softirq_dev_lock);
191-
192-
if (!list_empty(&softirq_dev_entry_list)) {
193-
entry = get_first_item(&softirq_dev_entry_list,
194-
struct ptdev_remapping_info, softirq_node);
195-
list_del_init(&entry->softirq_node);
196-
}
197-
198-
spinlock_irqrestore_release(&softirq_dev_lock);
199-
return entry;
200-
}
201-
202-
/* require ptdev_lock protect */
203-
static struct ptdev_remapping_info *
204-
alloc_entry(struct vm *vm, enum ptdev_intr_type type)
205-
{
206-
struct ptdev_remapping_info *entry;
207-
208-
/* allocate */
209-
entry = calloc(1, sizeof(*entry));
210-
ASSERT(entry, "alloc memory failed");
211-
entry->type = type;
212-
entry->vm = vm;
213-
214-
INIT_LIST_HEAD(&entry->softirq_node);
215-
INIT_LIST_HEAD(&entry->entry_node);
216-
217-
atomic_clear_int(&entry->active, ACTIVE_FLAG);
218-
list_add(&entry->entry_node, &ptdev_list);
219-
220-
return entry;
221-
}
222-
223-
/* require ptdev_lock protect */
224-
static void
225-
release_entry(struct ptdev_remapping_info *entry)
226-
{
227-
spinlock_rflags;
228-
229-
/* remove entry from ptdev_list */
230-
list_del_init(&entry->entry_node);
231-
232-
/*
233-
* remove entry from softirq list.the ptdev_lock
234-
* is required before calling release_entry.
235-
*/
236-
spinlock_irqsave_obtain(&softirq_dev_lock);
237-
list_del_init(&entry->softirq_node);
238-
spinlock_irqrestore_release(&softirq_dev_lock);
239-
240-
free(entry);
241-
}
242-
243-
/* require ptdev_lock protect */
244-
static void
245-
release_all_entries(struct vm *vm)
246-
{
247-
struct ptdev_remapping_info *entry;
248-
struct list_head *pos, *tmp;
249-
250-
list_for_each_safe(pos, tmp, &ptdev_list) {
251-
entry = list_entry(pos, struct ptdev_remapping_info,
252-
entry_node);
253-
if (entry->vm == vm)
254-
release_entry(entry);
255-
}
256-
}
257-
258-
/* interrupt context */
259-
static int ptdev_interrupt_handler(__unused int irq, void *data)
260-
{
261-
struct ptdev_remapping_info *entry =
262-
(struct ptdev_remapping_info *) data;
263-
264-
ptdev_enqueue_softirq(entry);
265-
return 0;
266-
}
267-
268147
static void
269148
ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
270149
{
@@ -306,40 +185,6 @@ ptdev_update_irq_handler(struct vm *vm, struct ptdev_remapping_info *entry)
306185
}
307186
}
308187

309-
/* active intr with irq registering */
310-
static struct ptdev_remapping_info *
311-
ptdev_activate_entry(struct ptdev_remapping_info *entry, uint32_t phys_irq,
312-
bool lowpri)
313-
{
314-
struct dev_handler_node *node;
315-
316-
/* register and allocate host vector/irq */
317-
node = normal_register_handler(phys_irq, ptdev_interrupt_handler,
318-
(void *)entry, true, lowpri, "dev assign");
319-
320-
ASSERT(node != NULL, "dev register failed");
321-
entry->node = node;
322-
323-
atomic_set_int(&entry->active, ACTIVE_FLAG);
324-
return entry;
325-
}
326-
327-
static void
328-
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
329-
{
330-
spinlock_rflags;
331-
332-
atomic_clear_int(&entry->active, ACTIVE_FLAG);
333-
334-
unregister_handler_common(entry->node);
335-
entry->node = NULL;
336-
337-
/* remove from softirq list if added */
338-
spinlock_irqsave_obtain(&softirq_dev_lock);
339-
list_del_init(&entry->softirq_node);
340-
spinlock_irqrestore_release(&softirq_dev_lock);
341-
}
342-
343188
static bool ptdev_hv_owned_intx(struct vm *vm, struct ptdev_intx_info *info)
344189
{
345190
/* vm0 pin 4 (uart) is owned by hypervisor under debug version */
@@ -1068,25 +913,6 @@ void ptdev_remove_msix_remapping(struct vm *vm, uint16_t virt_bdf,
1068913
remove_msix_remapping(vm, virt_bdf, i);
1069914
}
1070915

1071-
void ptdev_init(void)
1072-
{
1073-
if (get_cpu_id() > 0)
1074-
return;
1075-
1076-
INIT_LIST_HEAD(&ptdev_list);
1077-
spinlock_init(&ptdev_lock);
1078-
INIT_LIST_HEAD(&softirq_dev_entry_list);
1079-
spinlock_init(&softirq_dev_lock);
1080-
}
1081-
1082-
void ptdev_release_all_entries(struct vm *vm)
1083-
{
1084-
/* VM already down */
1085-
spinlock_obtain(&ptdev_lock);
1086-
release_all_entries(vm);
1087-
spinlock_release(&ptdev_lock);
1088-
}
1089-
1090916
static void get_entry_info(struct ptdev_remapping_info *entry, char *type,
1091917
uint32_t *irq, uint32_t *vector, uint64_t *dest, bool *lvl_tm,
1092918
int *pin, int *vpin, int *bdf, int *vbdf)

hypervisor/common/ptdev.c

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
/*
2+
* Copyright (C) 2018 Intel Corporation. All rights reserved.
3+
*
4+
* SPDX-License-Identifier: BSD-3-Clause
5+
*/
6+
7+
#include <hypervisor.h>
8+
#include <ptdev.h>
9+
10+
/* SOFTIRQ_DEV_ASSIGN list for all CPUs */
11+
struct list_head softirq_dev_entry_list;
12+
/* passthrough device link */
13+
struct list_head ptdev_list;
14+
spinlock_t ptdev_lock;
15+
16+
/* invalid_entry for error return */
17+
struct ptdev_remapping_info invalid_entry = {
18+
.type = PTDEV_INTR_INV,
19+
};
20+
21+
/*
22+
* entry could both be in ptdev_list and softirq_dev_entry_list.
23+
* When release entry, we need make sure entry deleted from both
24+
* lists. We have to require two locks and the lock sequence is:
25+
* ptdev_lock
26+
* softirq_dev_lock
27+
*/
28+
spinlock_t softirq_dev_lock;
29+
30+
static void ptdev_enqueue_softirq(struct ptdev_remapping_info *entry)
31+
{
32+
spinlock_rflags;
33+
/* enqueue request in order, SOFTIRQ_DEV_ASSIGN will pickup */
34+
spinlock_irqsave_obtain(&softirq_dev_lock);
35+
36+
/* avoid adding recursively */
37+
list_del(&entry->softirq_node);
38+
/* TODO: assert if entry already in list */
39+
list_add_tail(&entry->softirq_node,
40+
&softirq_dev_entry_list);
41+
spinlock_irqrestore_release(&softirq_dev_lock);
42+
raise_softirq(SOFTIRQ_DEV_ASSIGN);
43+
}
44+
45+
struct ptdev_remapping_info*
46+
ptdev_dequeue_softirq(void)
47+
{
48+
struct ptdev_remapping_info *entry = NULL;
49+
50+
spinlock_rflags;
51+
spinlock_irqsave_obtain(&softirq_dev_lock);
52+
53+
if (!list_empty(&softirq_dev_entry_list)) {
54+
entry = get_first_item(&softirq_dev_entry_list,
55+
struct ptdev_remapping_info, softirq_node);
56+
list_del_init(&entry->softirq_node);
57+
}
58+
59+
spinlock_irqrestore_release(&softirq_dev_lock);
60+
return entry;
61+
}
62+
63+
/* require ptdev_lock protect */
64+
struct ptdev_remapping_info *
65+
alloc_entry(struct vm *vm, enum ptdev_intr_type type)
66+
{
67+
struct ptdev_remapping_info *entry;
68+
69+
/* allocate */
70+
entry = calloc(1, sizeof(*entry));
71+
ASSERT(entry, "alloc memory failed");
72+
entry->type = type;
73+
entry->vm = vm;
74+
75+
INIT_LIST_HEAD(&entry->softirq_node);
76+
INIT_LIST_HEAD(&entry->entry_node);
77+
78+
atomic_clear_int(&entry->active, ACTIVE_FLAG);
79+
list_add(&entry->entry_node, &ptdev_list);
80+
81+
return entry;
82+
}
83+
84+
/* require ptdev_lock protect */
85+
void
86+
release_entry(struct ptdev_remapping_info *entry)
87+
{
88+
spinlock_rflags;
89+
90+
/* remove entry from ptdev_list */
91+
list_del_init(&entry->entry_node);
92+
93+
/*
94+
* remove entry from softirq list.the ptdev_lock
95+
* is required before calling release_entry.
96+
*/
97+
spinlock_irqsave_obtain(&softirq_dev_lock);
98+
list_del_init(&entry->softirq_node);
99+
spinlock_irqrestore_release(&softirq_dev_lock);
100+
101+
free(entry);
102+
}
103+
104+
/* require ptdev_lock protect */
105+
static void
106+
release_all_entries(struct vm *vm)
107+
{
108+
struct ptdev_remapping_info *entry;
109+
struct list_head *pos, *tmp;
110+
111+
list_for_each_safe(pos, tmp, &ptdev_list) {
112+
entry = list_entry(pos, struct ptdev_remapping_info,
113+
entry_node);
114+
if (entry->vm == vm)
115+
release_entry(entry);
116+
}
117+
}
118+
119+
/* interrupt context */
120+
static int ptdev_interrupt_handler(__unused int irq, void *data)
121+
{
122+
struct ptdev_remapping_info *entry =
123+
(struct ptdev_remapping_info *) data;
124+
125+
ptdev_enqueue_softirq(entry);
126+
return 0;
127+
}
128+
129+
/* active intr with irq registering */
130+
struct ptdev_remapping_info *
131+
ptdev_activate_entry(struct ptdev_remapping_info *entry, int phys_irq,
132+
bool lowpri)
133+
{
134+
struct dev_handler_node *node;
135+
136+
/* register and allocate host vector/irq */
137+
node = normal_register_handler(phys_irq, ptdev_interrupt_handler,
138+
(void *)entry, true, lowpri, "dev assign");
139+
140+
ASSERT(node != NULL, "dev register failed");
141+
entry->node = node;
142+
143+
atomic_set_int(&entry->active, ACTIVE_FLAG);
144+
return entry;
145+
}
146+
147+
void
148+
ptdev_deactivate_entry(struct ptdev_remapping_info *entry)
149+
{
150+
spinlock_rflags;
151+
152+
atomic_clear_int(&entry->active, ACTIVE_FLAG);
153+
154+
unregister_handler_common(entry->node);
155+
entry->node = NULL;
156+
157+
/* remove from softirq list if added */
158+
spinlock_irqsave_obtain(&softirq_dev_lock);
159+
list_del_init(&entry->softirq_node);
160+
spinlock_irqrestore_release(&softirq_dev_lock);
161+
}
162+
163+
void ptdev_init(void)
164+
{
165+
if (get_cpu_id() > 0)
166+
return;
167+
168+
INIT_LIST_HEAD(&ptdev_list);
169+
spinlock_init(&ptdev_lock);
170+
INIT_LIST_HEAD(&softirq_dev_entry_list);
171+
spinlock_init(&softirq_dev_lock);
172+
}
173+
174+
void ptdev_release_all_entries(struct vm *vm)
175+
{
176+
/* VM already down */
177+
spinlock_obtain(&ptdev_lock);
178+
release_all_entries(vm);
179+
spinlock_release(&ptdev_lock);
180+
}
181+
182+

0 commit comments

Comments
 (0)