Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

gdev: implemented the Credit scheduling algorithm for virtual devices

  • Loading branch information...
commit 010bd64fae785e0d4ffab352dbd5a8d6fd8b2d3c 1 parent 657f7a2
Shinpei Kato authored
View
2  common/gdev_device.h
@@ -64,6 +64,8 @@ struct gdev_device {
struct gdev_sched_entity *se_com_current; /* compute scheduling entity */
struct gdev_sched_entity *se_mem_current; /* memory scheduling entity */
struct gdev_device *parent; /* only for virtual devices */
+ struct gdev_list list_entry_com; /* entry to active compute list */
+ struct gdev_list list_entry_mem; /* entry to active memory list */
struct gdev_list sched_com_list; /* wait list for compute scheduling */
struct gdev_list sched_mem_list; /* wait list for memory scheduling */
struct gdev_list vas_list; /* list of VASes allocated to this device */
View
24 common/gdev_list.h
@@ -43,21 +43,10 @@ struct gdev_list {
static inline void gdev_list_init(struct gdev_list *entry, void *container)
{
- entry->next = entry->prev = NULL;
+ entry->next = entry->prev = entry; /* used to be "= NULL" */
entry->container = container;
}
-static inline void gdev_list_add(struct gdev_list *entry, struct gdev_list *head)
-{
- struct gdev_list *next = head->next;
-
- entry->next = next;
- if (next)
- next->prev = entry;
- entry->prev = head; /* link to the head. */
- head->next = entry;
-}
-
static inline void gdev_list_add_next(struct gdev_list *entry, struct gdev_list *pos)
{
struct gdev_list *next = pos->next;
@@ -80,6 +69,16 @@ static inline void gdev_list_add_prev(struct gdev_list *entry, struct gdev_list
pos->prev = entry;
}
+static inline void gdev_list_add(struct gdev_list *entry, struct gdev_list *head)
+{
+ return gdev_list_add_next(entry, head);
+}
+
+static inline void gdev_list_add_tail(struct gdev_list *entry, struct gdev_list *head)
+{
+ return gdev_list_add_prev(entry, head);
+}
+
static inline void gdev_list_del(struct gdev_list *entry)
{
struct gdev_list *next = entry->next;
@@ -101,6 +100,7 @@ static inline int gdev_list_empty(struct gdev_list *entry)
static inline struct gdev_list *gdev_list_head(struct gdev_list *head)
{
+ /* head->next is the actual head of the list. */
return head ? head->next : NULL;
}
View
166 common/gdev_sched.c
@@ -26,27 +26,57 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "gdev_device.h"
#include "gdev_sched.h"
#include "gdev_system.h"
struct gdev_sched_entity *sched_entity_ptr[GDEV_CONTEXT_MAX_COUNT];
-gdev_lock_t global_sched_lock;
/**
- * initialize the scheduler for the device.
+ * initialize the local scheduler for the device.
*/
int gdev_init_scheduler(struct gdev_device *gdev)
{
+ struct gdev_device *phys = gdev->parent;
+
gdev_sched_create_scheduler(gdev);
+ if (phys) {
+ gdev_lock(&phys->sched_com_lock);
+ gdev_list_init(&gdev->list_entry_com, (void*)gdev);
+ gdev_list_add(&gdev->list_entry_com, &phys->sched_com_list);
+ gdev_unlock(&phys->sched_com_lock);
+ gdev_replenish_credit_compute(gdev);
+
+ gdev_lock(&phys->sched_mem_lock);
+ gdev_list_init(&gdev->list_entry_mem, (void*)gdev);
+ gdev_list_add(&gdev->list_entry_mem, &phys->sched_mem_list);
+ gdev_unlock(&phys->sched_mem_lock);
+ gdev_replenish_credit_memory(gdev);
+ }
+
return 0;
}
/**
- * finalized the scheduler for the device.
+ * finalized the local scheduler for the device.
*/
void gdev_exit_scheduler(struct gdev_device *gdev)
{
+ struct gdev_device *phys = gdev->parent;
+
+ if (phys) {
+ gdev_lock(&phys->sched_com_lock);
+ if (!gdev_list_empty(&gdev->list_entry_com))
+ gdev_list_del(&gdev->list_entry_com);
+ gdev_unlock(&phys->sched_com_lock);
+
+ gdev_lock(&phys->sched_mem_lock);
+ if (!gdev_list_empty(&gdev->list_entry_mem))
+ gdev_list_del(&gdev->list_entry_mem);
+ gdev_unlock(&phys->sched_mem_lock);
+ }
+
gdev_sched_destroy_scheduler(gdev);
}
@@ -70,6 +100,8 @@ struct gdev_sched_entity *gdev_sched_entity_create(struct gdev_device *gdev, gde
se->memcpy_instances = 0;
gdev_list_init(&se->list_entry_com, (void*)se);
gdev_list_init(&se->list_entry_mem, (void*)se);
+ gdev_time_us(&se->last_tick_com, 0);
+ gdev_time_us(&se->last_tick_mem, 0);
sched_entity_ptr[gdev_ctx_get_cid(ctx)] = se;
return se;
@@ -83,6 +115,36 @@ void gdev_sched_entity_destroy(struct gdev_sched_entity *se)
FREE(se);
}
+/**
+ * insert the scheduling entity to the priority-ordered compute list.
+ * gdev->sched_com_lock must be locked.
+ */
+static void __gdev_enqueue_compute(struct gdev_device *gdev, struct gdev_sched_entity *se)
+{
+ struct gdev_sched_entity *p;
+
+ gdev_list_for_each (p, &gdev->sched_com_list, list_entry_com) {
+ if (se->prio > p->prio) {
+ gdev_list_add_prev(&se->list_entry_com, &p->list_entry_com);
+ break;
+ }
+ }
+ if (gdev_list_empty(&se->list_entry_com))
+ gdev_list_add_tail(&se->list_entry_com, &gdev->sched_com_list);
+}
+
+/**
+ * delete the scheduling entity from the priority-ordered compute list.
+ * gdev->sched_com_lock must be locked.
+ */
+static void __gdev_dequeue_compute(struct gdev_sched_entity *se)
+{
+ gdev_list_del(&se->list_entry_com);
+}
+
+/**
+ * scheduling policy files.
+ */
#include "gdev_vsched_credit.c"
#define GDEV_VSCHED_POLICY_CREDIT
@@ -96,29 +158,16 @@ struct gdev_vsched_policy *gdev_vsched = &gdev_vsched_credit;
void gdev_schedule_compute(struct gdev_sched_entity *se)
{
struct gdev_device *gdev = se->gdev;
- struct gdev_sched_entity *p, *tail = NULL;
resched:
/* algorithm-specific virtual device scheduler. */
- gdev_vsched->schedule(gdev);
+ gdev_vsched->schedule_compute(se);
/* local compute scheduler. */
gdev_lock(&gdev->sched_com_lock);
if (gdev->se_com_current && gdev->se_com_current != se) {
- /* insert the scheduling entity to the priority-ordered list. */
- gdev_list_for_each (p, &gdev->sched_com_list, list_entry_com) {
- if (se->prio > p->prio) {
- gdev_list_add_prev(&se->list_entry_com, &p->list_entry_com);
- break;
- }
- tail = p;
- }
- if (gdev_list_empty(&se->list_entry_com)) {
- if (tail)
- gdev_list_add_next(&se->list_entry_com, &tail->list_entry_com);
- else
- gdev_list_add(&se->list_entry_com, &gdev->sched_com_list);
- }
+ /* enqueue the scheduling entity to the compute queue. */
+ __gdev_enqueue_compute(gdev, se);
gdev_unlock(&gdev->sched_com_lock);
/* now the corresponding task will be suspended until some other tasks
@@ -131,7 +180,7 @@ void gdev_schedule_compute(struct gdev_sched_entity *se)
/* now, let's get offloaded to the device! */
if (se->launch_instances == 0) {
/* record the start time. */
- gdev_time_stamp(&se->start);
+ gdev_time_stamp(&se->last_tick_com);
}
se->launch_instances++;
gdev->se_com_current = se;
@@ -143,34 +192,40 @@ void gdev_schedule_compute(struct gdev_sched_entity *se)
* schedule the next context of compute.
* invoked upon the completion of preceding contexts.
*/
-void gdev_schedule_compute_post(struct gdev_device *gdev)
+void gdev_select_next_compute(struct gdev_device *gdev)
{
struct gdev_sched_entity *se, *next;
+ struct gdev_time now;
gdev_lock(&gdev->sched_com_lock);
se = gdev->se_com_current;
- if (se) {
- se->launch_instances--;
- if (se->launch_instances == 0) {
- /* record the end time. */
- gdev_time_stamp(&se->end);
- gdev_time_sub(&gdev->credit_com, &se->end, &se->start);
- /* select the next context. */
- next = gdev_list_container(gdev_list_head(&gdev->sched_com_list));
- /* remove it from the waiting list. */
- if (next)
- gdev_list_del(&next->list_entry_com);
- /* now this is going to be the current context. */
- gdev->se_com_current = next;
- gdev_unlock(&gdev->sched_com_lock);
- /* wake up the next context! */
- if (next) {
- /* could be enforced when awakened. */
- gdev_sched_wakeup(next->task);
- }
+ if (!se) {
+ gdev_unlock(&gdev->sched_com_lock);
+ GDEV_PRINT("Invalid scheduling entity on Gdev#%d\n", gdev->id);
+ return;
+ }
+
+ /* record the end time (update on multiple launches too). */
+ gdev_time_stamp(&now);
+
+ se->launch_instances--;
+ if (se->launch_instances == 0) {
+ /* select the next device to be scheduled. */
+ gdev = gdev_vsched->select_next_compute(gdev);
+
+ /* select the next context to be scheduled. */
+ next = gdev_list_container(gdev_list_head(&gdev->sched_com_list));
+ /* remove it from the waiting list. */
+ if (next)
+ __gdev_dequeue_compute(next);
+ gdev->se_com_current = NULL; /* null clear once. */
+ gdev_unlock(&gdev->sched_com_lock);
+
+ /* wake up the next context! */
+ if (next) {
+ /* could be enforced when awakened. */
+ gdev_sched_wakeup(next->task);
}
- else
- gdev_unlock(&gdev->sched_com_lock);
}
else
gdev_unlock(&gdev->sched_com_lock);
@@ -181,38 +236,27 @@ void gdev_schedule_compute_post(struct gdev_device *gdev)
*/
void gdev_schedule_memory(struct gdev_sched_entity *se)
{
- //gdev_schedule_compute(se);
}
/**
* schedule the next context of memory copy.
* invoked upon the completion of preceding contexts.
*/
-void gdev_schedule_memory_post(struct gdev_device *gdev)
+void gdev_select_next_memory(struct gdev_device *gdev)
{
- //gdev_schedule_compute_post(gdev);
}
+/**
+ * automatically replenish the credit of compute launches.
+ */
void gdev_replenish_credit_compute(struct gdev_device *gdev)
{
- struct gdev_time credit, threshold;
-
- gdev_time_us(&credit, gdev->period * gdev->com_bw / 100);
- gdev_time_add(&gdev->credit_com, &gdev->credit_com, &credit);
- /* when the credit exceeds the threshold, all credits taken away. */
- gdev_time_us(&threshold, GDEV_CREDIT_INACTIVE_THRESHOLD);
- if (gdev_time_g(&gdev->credit_com, &threshold))
- gdev_time_us(&gdev->credit_com, 0);
+ gdev_vsched->replenish_compute(gdev);
}
+/**
+ * automatically replenish the credit of memory copies.
+ */
void gdev_replenish_credit_memory(struct gdev_device *gdev)
{
- struct gdev_time credit, threshold;
-
- gdev_time_us(&credit, gdev->period * gdev->mem_bw / 100);
- gdev_time_add(&gdev->credit_mem, &gdev->credit_mem, &credit);
- /* when the credit exceeds the threshold, all credits taken away. */
- gdev_time_us(&threshold, GDEV_CREDIT_INACTIVE_THRESHOLD);
- if (gdev_time_g(&gdev->credit_mem, &threshold))
- gdev_time_us(&gdev->credit_mem, 0);
}
View
14 common/gdev_sched.h
@@ -43,7 +43,7 @@
* virtual device period/threshold.
*/
#define GDEV_PERIOD_DEFAULT 30000 /* microseconds */
-#define GDEV_CREDIT_INACTIVE_THRESHOLD 300000 /* microseconds */
+#define GDEV_CREDIT_INACTIVE_THRESHOLD GDEV_PERIOD_DEFAULT
struct gdev_sched_entity {
struct gdev_device *gdev; /* associated Gdev (virtual) device */
@@ -53,14 +53,16 @@ struct gdev_sched_entity {
int rt_prio; /* real-time priority */
struct gdev_list list_entry_com; /* entry to compute scheduler list */
struct gdev_list list_entry_mem; /* entry to memory scheduler list */
- struct gdev_time start; /* start time of kernel execution */
- struct gdev_time end; /* end time of kernel execution */
+ struct gdev_time last_tick_com; /* last tick of compute execution */
+ struct gdev_time last_tick_mem; /* last tick of memory transfer */
int launch_instances;
int memcpy_instances;
};
struct gdev_vsched_policy {
- void (*schedule)(struct gdev_device *gdev);
+ void (*schedule_compute)(struct gdev_sched_entity *se);
+ struct gdev_device *(*select_next_compute)(struct gdev_device *gdev);
+ void (*replenish_compute)(struct gdev_device *gdev);
};
int gdev_init_scheduler(struct gdev_device *gdev);
@@ -70,9 +72,9 @@ struct gdev_sched_entity *gdev_sched_entity_create(struct gdev_device *gdev, gde
void gdev_sched_entity_destroy(struct gdev_sched_entity *se);
void gdev_schedule_compute(struct gdev_sched_entity *se);
-void gdev_schedule_compute_post(struct gdev_device *gdev);
+void gdev_select_next_compute(struct gdev_device *gdev);
void gdev_schedule_memory(struct gdev_sched_entity *se);
-void gdev_schedule_memory_post(struct gdev_device *gdev);
+void gdev_select_next_memory(struct gdev_device *gdev);
void gdev_replenish_credit_compute(struct gdev_device *gdev);
void gdev_replenish_credit_memory(struct gdev_device *gdev);
View
10 common/gdev_time.h
@@ -107,7 +107,7 @@ static inline int gdev_time_eq(struct gdev_time *x, struct gdev_time *y)
}
/* x > y */
-static inline int gdev_time_g(struct gdev_time *x, struct gdev_time *y)
+static inline int gdev_time_gt(struct gdev_time *x, struct gdev_time *y)
{
if (!x->neg && y->neg)
return true;
@@ -125,11 +125,11 @@ static inline int gdev_time_ge(struct gdev_time *x, struct gdev_time *y)
if (gdev_time_eq(x, y))
return true;
else
- return gdev_time_g(x, y);
+ return gdev_time_gt(x, y);
}
/* x < y */
-static inline int gdev_time_l(struct gdev_time *x, struct gdev_time *y)
+static inline int gdev_time_lt(struct gdev_time *x, struct gdev_time *y)
{
if (!x->neg && y->neg)
return true;
@@ -147,7 +147,7 @@ static inline int gdev_time_le(struct gdev_time *x, struct gdev_time *y)
if (gdev_time_eq(x, y))
return true;
else
- return gdev_time_l(x, y);
+ return gdev_time_lt(x, y);
}
/* ret = x + y (x and y must be positive) */
@@ -164,7 +164,7 @@ static inline void __gdev_time_add_pos(struct gdev_time *ret, struct gdev_time *
/* ret = x - y (x and y must be positive) */
static inline void __gdev_time_sub_pos(struct gdev_time *ret, struct gdev_time *x, struct gdev_time *y)
{
- if (gdev_time_l(x, y)) {
+ if (gdev_time_lt(x, y)) {
struct gdev_time *tmp = x;
x = y;
y = tmp;
View
94 common/gdev_vsched_credit.c
@@ -26,10 +26,100 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-static void gdev_vsched_credit_schedule(struct gdev_device *gdev)
+static void gdev_vsched_credit_schedule_compute(struct gdev_sched_entity *se)
{
+ struct gdev_device *gdev = se->gdev;
+ struct gdev_device *phys = gdev->parent;
+
+ if (!phys)
+ return;
+
+resched:
+ gdev_lock(&phys->sched_com_lock);
+ if (phys->se_com_current && phys->se_com_current != se) {
+ /* insert the scheduling entity to its local priority-ordered list. */
+ gdev_lock_nested(&gdev->sched_com_lock);
+ __gdev_enqueue_compute(gdev, se);
+ gdev_unlock_nested(&gdev->sched_com_lock);
+ gdev_unlock(&phys->sched_com_lock);
+
+ /* now the corresponding task will be suspended until some other tasks
+ will awaken it upon completions of their compute launches. */
+ gdev_sched_sleep();
+
+ goto resched;
+ }
+ else {
+ phys->se_com_current = se;
+ gdev_unlock(&phys->sched_com_lock);
+ }
+}
+
+static struct gdev_device *gdev_vsched_credit_select_next_compute(struct gdev_device *gdev)
+{
+ struct gdev_device *phys = gdev->parent;
+ struct gdev_device *next;
+ struct gdev_sched_entity *se;
+ struct gdev_time now, zero;
+
+ if (!phys)
+ return gdev;
+
+ /* account for the computation time. */
+ se = gdev->se_com_current;
+ gdev_time_stamp(&now);
+ gdev_time_sub(&gdev->credit_com, &now, &se->last_tick_com);
+
+ /* select the next device. */
+ gdev_lock(&phys->sched_com_lock);
+
+ /* if the credit is exhausted, reinsert the device. */
+ gdev_time_us(&zero, 0);
+ if (gdev_time_le(&gdev->credit_com, &zero)) {
+ gdev_list_del(&gdev->list_entry_com);
+ gdev_list_add(&gdev->list_entry_com, &phys->sched_com_list);
+ }
+
+ gdev_list_for_each(next, &phys->sched_com_list, list_entry_com) {
+ /* if the current device is found first as an available device, break
+ the search loop. note that gdev->sched_com_lock is alreay locked. */
+ if (next == gdev)
+ goto device_not_switched;
+ else {
+ gdev_lock_nested(&next->sched_com_lock);
+ if (!gdev_list_empty(&next->sched_com_list)) {
+ /* unlock the current device. the next device will be
+ unlocked after this function returns. */
+ gdev_unlock_nested(&gdev->sched_com_lock);
+ goto device_switched;
+ }
+ gdev_unlock_nested(&next->sched_com_lock);
+ }
+ }
+device_not_switched:
+ next = gdev;
+device_switched:
+ phys->se_com_current = NULL; /* null clear */
+ gdev_unlock(&phys->sched_com_lock);
+
+ return next;
+}
+
+static void gdev_vsched_credit_replenish_compute(struct gdev_device *gdev)
+{
+ struct gdev_time credit, threshold;
+
+ gdev_time_us(&credit, gdev->period * gdev->com_bw / 100);
+ gdev_time_add(&gdev->credit_com, &gdev->credit_com, &credit);
+ /* when the credit exceeds the threshold, all credits taken away. */
+ gdev_time_us(&threshold, GDEV_CREDIT_INACTIVE_THRESHOLD);
+ if (gdev_time_gt(&gdev->credit_com, &threshold)) {
+ gdev_time_us(&gdev->credit_com, 0);
+ }
}
static struct gdev_vsched_policy gdev_vsched_credit = {
- .schedule = gdev_vsched_credit_schedule,
+ .schedule_compute = gdev_vsched_credit_schedule_compute,
+ .select_next_compute = gdev_vsched_credit_select_next_compute,
+ .replenish_compute = gdev_vsched_credit_replenish_compute,
};
View
17 driver/gdev/gdev_drv.c
@@ -91,7 +91,7 @@ static int __gdev_sched_com_thread(void *__data)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
#ifndef GDEV_SCHEDULER_DISABLED
- gdev_schedule_compute_post(gdev);
+ gdev_select_next_compute(gdev);
#endif
}
@@ -109,7 +109,7 @@ static int __gdev_sched_mem_thread(void *__data)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
#ifndef GDEV_SCHEDULER_DISABLED
- gdev_schedule_memory_post(gdev);
+ gdev_select_next_memory(gdev);
#endif
}
@@ -132,12 +132,12 @@ static int __gdev_credit_com_thread(void *__data)
setup_timer_on_stack(&timer, __gdev_credit_handler, (unsigned long)current);
while (!kthread_should_stop()) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
#ifndef GDEV_SCHEDULER_DISABLED
gdev_replenish_credit_compute(gdev);
mod_timer(&timer, jiffies + usecs_to_jiffies(gdev->period));
#endif
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
}
local_irq_enable();
@@ -160,12 +160,12 @@ static int __gdev_credit_mem_thread(void *__data)
setup_timer_on_stack(&timer, __gdev_credit_handler, (unsigned long)current);
while (!kthread_should_stop()) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
#ifndef GDEV_SCHEDULER_DISABLED
gdev_replenish_credit_memory(gdev);
mod_timer(&timer, jiffies + usecs_to_jiffies(gdev->period));
#endif
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
}
local_irq_enable();
@@ -318,7 +318,7 @@ int gdev_minor_init(struct drm_device *drm)
when Gdev first loaded, one-to-one map physical and virtual device. */
gdev_init_virtual_device(&gdev_vds[id], id, &gdevs[id]);
- /* initialize the scheduler for the virtual device. */
+ /* initialize the local scheduler for the virtual device. */
gdev_init_scheduler(&gdev_vds[id]);
return 0;
@@ -417,9 +417,6 @@ int gdev_major_init(struct pci_driver *pdriver)
/* set interrupt handler. */
gdev_callback_notify = __gdev_notify_handler;
- /* init global scheduler lock. */
- gdev_lock_init(&global_sched_lock);
-
return 0;
fail_proc_create:
View
2  test/cuda/common/loop_repeated.c
@@ -124,7 +124,7 @@ int cuda_test_loop(unsigned int n, int count, char *path)
}
repeat:
- usleep(50);
+ usleep(100);
res = cuLaunchGrid(function, grid_x, grid_y);
if (res != CUDA_SUCCESS) {
printf("cuLaunchGrid failed: res = %lu\n", (unsigned long)res);
Please sign in to comment.
Something went wrong with that request. Please try again.