Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

gdev: added gdev_global_{lock,unlock}()

gdev: added limits on GDEV_INSTANCES_LIMIT to avoid some bugs
  • Loading branch information...
commit a2b597145772636d5a455353e8eac71fff656ccc 1 parent 162ee89
Shinpei Kato authored
View
7 common/gdev_api.c
@@ -583,6 +583,8 @@ struct gdev_handle *gopen(int minor)
goto fail_open;
}
+ gdev_global_lock(gdev);
+
/* create a new virual address space (VAS) object. */
vas = gdev_vas_new(gdev, GDEV_VAS_SIZE, h);
if (!vas) {
@@ -610,6 +612,8 @@ struct gdev_handle *gopen(int minor)
GDEV_PRINT("Failed to allocate scheduling entity\n");
goto fail_se;
}
+
+ gdev_global_unlock(gdev);
/* save the objects to the handle. */
h->se = se;
@@ -630,6 +634,7 @@ struct gdev_handle *gopen(int minor)
fail_ctx:
gdev_vas_free(vas);
fail_vas:
+ gdev_global_unlock(gdev);
gdev_dev_close(gdev);
fail_open:
return NULL;
@@ -657,8 +662,10 @@ int gclose(struct gdev_handle *h)
gdev_mem_gc(h->vas);
/* free the objects. */
+ gdev_global_lock(h->gdev);
gdev_ctx_free(h->ctx);
gdev_vas_free(h->vas);
+ gdev_global_unlock(h->gdev);
gdev_dev_close(h->gdev);
GDEV_PRINT("Closed gdev%d\n", h->dev_id);
View
10 common/gdev_arch.h
@@ -64,15 +64,17 @@ void gdev_vas_free(gdev_vas_t *vas);
gdev_ctx_t *gdev_ctx_new(struct gdev_device *gdev, gdev_vas_t *vas);
void gdev_ctx_free(gdev_ctx_t *ctx);
int gdev_ctx_get_cid(gdev_ctx_t *ctx);
+void gdev_global_lock(struct gdev_device *gdev);
+void gdev_global_unlock(struct gdev_device *gdev);
+void gdev_mem_lock(gdev_mem_t *mem);
+void gdev_mem_unlock(gdev_mem_t *mem);
+void gdev_mem_lock_all(gdev_vas_t *vas);
+void gdev_mem_unlock_all(gdev_vas_t *vas);
gdev_mem_t *gdev_mem_alloc(gdev_vas_t *vas, uint64_t size, int type);
gdev_mem_t *gdev_mem_share(gdev_vas_t *vas, uint64_t size);
void gdev_mem_free(gdev_mem_t *mem);
void gdev_mem_gc(gdev_vas_t *vas);
gdev_mem_t *gdev_mem_lookup(gdev_vas_t *vas, uint64_t addr, int type);
-void gdev_mem_lock(gdev_mem_t *mem);
-void gdev_mem_unlock(gdev_mem_t *mem);
-void gdev_mem_lock_all(gdev_vas_t *vas);
-void gdev_mem_unlock_all(gdev_vas_t *vas);
void *gdev_mem_get_buf(gdev_mem_t *mem);
uint64_t gdev_mem_get_addr(gdev_mem_t *mem);
uint64_t gdev_mem_get_size(gdev_mem_t *mem);
View
11 common/gdev_device.c
@@ -36,6 +36,17 @@ int gdev_vcount = 0; /* # of virtual devices. */
struct gdev_device *gdevs = NULL; /* physical devices */
struct gdev_device *gdev_vds = NULL; /* virtual devices */
+int VCOUNT_LIST[GDEV_PHYSICAL_DEVICE_MAX_COUNT] = {
+ GDEV0_VIRTUAL_DEVICE_COUNT,
+ GDEV1_VIRTUAL_DEVICE_COUNT,
+ GDEV2_VIRTUAL_DEVICE_COUNT,
+ GDEV3_VIRTUAL_DEVICE_COUNT,
+ GDEV4_VIRTUAL_DEVICE_COUNT,
+ GDEV5_VIRTUAL_DEVICE_COUNT,
+ GDEV6_VIRTUAL_DEVICE_COUNT,
+ GDEV7_VIRTUAL_DEVICE_COUNT,
+};
+
void __gdev_init_device(struct gdev_device *gdev, int id)
{
gdev->id = id;
View
6 common/gdev_device.h
@@ -34,6 +34,11 @@
#include "gdev_system.h"
/**
+ * maximum number of physical devices that Gdev supports
+ */
+#define GDEV_PHYSICAL_DEVICE_MAX_COUNT 8
+
+/**
* generic subchannel definitions
*/
#define GDEV_SUBCH_LAUNCH 1
@@ -91,5 +96,6 @@ extern int gdev_count;
extern int gdev_vcount;
extern struct gdev_device *gdevs;
extern struct gdev_device *gdev_vds;
+extern int VCOUNT_LIST[GDEV_PHYSICAL_DEVICE_MAX_COUNT];
#endif
View
39 common/gdev_nvidia.c
@@ -124,3 +124,42 @@ int gdev_ctx_get_cid(struct gdev_ctx *ctx)
{
return ctx->cid;
}
+
+/* lock the device globally. */
+void gdev_global_lock(struct gdev_device *gdev)
+{
+ struct gdev_device *phys = gdev->parent;
+
+ if (phys) {
+ int physid = phys->id;
+ int i, j = 0;
+ for (i = 0; i < physid; i++)
+ j += VCOUNT_LIST[i];
+ for (i = j; i < j + VCOUNT_LIST[physid]; i++) {
+ gdev_mutex_lock(&gdev_vds[i].shm_mutex);
+ }
+ }
+ else {
+ gdev_mutex_lock(&gdev->shm_mutex);
+ }
+}
+
+/* unlock the device globally. */
+void gdev_global_unlock(struct gdev_device *gdev)
+{
+ struct gdev_device *phys = gdev->parent;
+
+ if (phys) {
+ int physid = phys->id;
+ int i, j = 0;
+ for (i = 0; i < physid; i++)
+ j += VCOUNT_LIST[i];
+ for (i = j + VCOUNT_LIST[physid] - 1; i >= j ; i--) {
+ gdev_mutex_unlock(&gdev_vds[i].shm_mutex);
+ }
+ }
+ else {
+ gdev_mutex_unlock(&gdev->shm_mutex);
+ }
+}
+
View
88 common/gdev_nvidia_mem.c
@@ -90,6 +90,50 @@ void gdev_nvidia_mem_list_del(struct gdev_mem *mem)
}
}
+/* lock the memory object so that none can change data while tranferring.
+ if there are no shared memory users, no need to lock. */
+void gdev_mem_lock(struct gdev_mem *mem)
+{
+ if (mem->shm) {
+ gdev_mutex_lock(&mem->shm->mutex);
+ }
+}
+
+/* unlock the memory object so that none can change data while tranferring.
+ if there are no shared memory users, no need to lock. */
+void gdev_mem_unlock(struct gdev_mem *mem)
+{
+ if (mem->shm) {
+ gdev_mutex_unlock(&mem->shm->mutex);
+ }
+}
+
+/* lock all the memory objects associated with @vas. */
+void gdev_mem_lock_all(struct gdev_vas *vas)
+{
+ struct gdev_device *gdev = vas->gdev;
+ struct gdev_mem *mem;
+
+ gdev_mutex_lock(&gdev->shm_mutex);
+ gdev_list_for_each (mem, &vas->mem_list, list_entry_heap) {
+ gdev_mem_lock(mem);
+ }
+ gdev_mutex_unlock(&gdev->shm_mutex);
+}
+
+/* unlock all the memory objects associated with @vas. */
+void gdev_mem_unlock_all(struct gdev_vas *vas)
+{
+ struct gdev_device *gdev = vas->gdev;
+ struct gdev_mem *mem;
+
+ gdev_mutex_lock(&gdev->shm_mutex);
+ gdev_list_for_each (mem, &vas->mem_list, list_entry_heap) {
+ gdev_mem_unlock(mem);
+ }
+ gdev_mutex_unlock(&gdev->shm_mutex);
+}
+
/* allocate a new memory object. */
struct gdev_mem *gdev_mem_alloc(struct gdev_vas *vas, uint64_t size, int type)
{
@@ -206,50 +250,6 @@ struct gdev_mem *gdev_mem_lookup(struct gdev_vas *vas, uint64_t addr, int type)
return mem;
}
-/* lock the memory object so that none can change data while tranferring.
- if there are no shared memory users, no need to lock. */
-void gdev_mem_lock(struct gdev_mem *mem)
-{
- if (mem->shm) {
- gdev_mutex_lock(&mem->shm->mutex);
- }
-}
-
-/* unlock the memory object so that none can change data while tranferring.
- if there are no shared memory users, no need to lock. */
-void gdev_mem_unlock(struct gdev_mem *mem)
-{
- if (mem->shm) {
- gdev_mutex_unlock(&mem->shm->mutex);
- }
-}
-
-/* lock all the memory objects associated with @vas. */
-void gdev_mem_lock_all(struct gdev_vas *vas)
-{
- struct gdev_device *gdev = vas->gdev;
- struct gdev_mem *mem;
-
- gdev_mutex_lock(&gdev->shm_mutex);
- gdev_list_for_each (mem, &vas->mem_list, list_entry_heap) {
- gdev_mem_lock(mem);
- }
- gdev_mutex_unlock(&gdev->shm_mutex);
-}
-
-/* unlock all the memory objects associated with @vas. */
-void gdev_mem_unlock_all(struct gdev_vas *vas)
-{
- struct gdev_device *gdev = vas->gdev;
- struct gdev_mem *mem;
-
- gdev_mutex_lock(&gdev->shm_mutex);
- gdev_list_for_each (mem, &vas->mem_list, list_entry_heap) {
- gdev_mem_unlock(mem);
- }
- gdev_mutex_unlock(&gdev->shm_mutex);
-}
-
/* get host DMA buffer. */
void *gdev_mem_get_buf(struct gdev_mem *mem)
{
View
11 common/gdev_sched.c
@@ -148,8 +148,8 @@ static void __gdev_dequeue_compute(struct gdev_sched_entity *se)
#include "gdev_vsched_credit.c"
#include "gdev_vsched_crod.c"
-//#define GDEV_VSCHED_POLICY_CREDIT
-#define GDEV_VSCHED_POLICY_CROD
+#define GDEV_VSCHED_POLICY_CREDIT
+//#define GDEV_VSCHED_POLICY_CROD
#if defined(GDEV_VSCHED_POLICY_CREDIT)
struct gdev_vsched_policy *gdev_vsched = &gdev_vsched_credit;
@@ -170,7 +170,7 @@ void gdev_schedule_compute(struct gdev_sched_entity *se)
/* local compute scheduler. */
gdev_lock(&gdev->sched_com_lock);
- if (gdev->current_com && gdev->current_com != se) {
+ if ((gdev->current_com && gdev->current_com != se) || se->launch_instances >= GDEV_INSTANCES_LIMIT) {
/* enqueue the scheduling entity to the compute queue. */
__gdev_enqueue_compute(gdev, se);
gdev_unlock(&gdev->sched_com_lock);
@@ -217,6 +217,7 @@ void gdev_select_next_compute(struct gdev_device *gdev)
gdev_time_sub(&exec, &now, &se->last_tick_com);
se->launch_instances--;
+ printk("Gdev#%d instances %d\n", gdev->id, se->launch_instances);
if (se->launch_instances == 0) {
/* account for the credit. */
gdev_time_sub(&gdev->credit_com, &gdev->credit_com, &exec);
@@ -253,7 +254,9 @@ void gdev_select_next_compute(struct gdev_device *gdev)
__gdev_dequeue_compute(se);
gdev_unlock(&next->sched_com_lock);
- gdev_sched_wakeup(se->task);
+ while (gdev_sched_wakeup(se->task) < 0) {
+ GDEV_PRINT("Failed to wake up context %d\n", se->ctx->cid);
+ }
}
else
gdev_unlock(&next->sched_com_lock);
View
7 common/gdev_sched.h
@@ -42,10 +42,15 @@
/**
* virtual device period/threshold.
*/
-#define GDEV_PERIOD_DEFAULT 30000 /* microseconds */
+#define GDEV_PERIOD_DEFAULT 100000 /*30000*/ /* microseconds */
#define GDEV_CREDIT_INACTIVE_THRESHOLD GDEV_PERIOD_DEFAULT
#define GDEV_UPDATE_INTERVAL (GDEV_PERIOD_DEFAULT * 30)
+/**
+ * scheduling properties.
+ */
+#define GDEV_INSTANCES_LIMIT 1
+
struct gdev_sched_entity {
struct gdev_device *gdev; /* associated Gdev (virtual) device */
void *task; /* private task structure */
View
2  common/gdev_system.h
@@ -51,7 +51,7 @@ void gdev_sched_destroy_scheduler(struct gdev_device *gdev);
void *gdev_sched_get_current_task(void);
int gdev_sched_get_static_prio(void *task);
void gdev_sched_sleep(void);
-void gdev_sched_wakeup(void *task);
+int gdev_sched_wakeup(void *task);
void gdev_lock_init(gdev_lock_t *p);
void gdev_lock(gdev_lock_t *p);
void gdev_unlock(gdev_lock_t *p);
View
4 common/gdev_vsched_credit.c
@@ -43,6 +43,8 @@ static void gdev_vsched_credit_schedule_compute(struct gdev_sched_entity *se)
gdev_unlock_nested(&gdev->sched_com_lock);
gdev_unlock(&phys->sched_com_lock);
+ GDEV_PRINT("Gdev#%d Sleep\n", gdev->id);
+
/* now the corresponding task will be suspended until some other tasks
will awaken it upon completions of their compute launches. */
gdev_sched_sleep();
@@ -52,6 +54,8 @@ static void gdev_vsched_credit_schedule_compute(struct gdev_sched_entity *se)
else {
phys->current_com = (void *)gdev;
gdev_unlock(&phys->sched_com_lock);
+
+ GDEV_PRINT("Gdev#%d Run\n", gdev->id);
}
}
View
3  common/gdev_vsched_crod.c
@@ -43,6 +43,8 @@ static void gdev_vsched_crod_schedule_compute(struct gdev_sched_entity *se)
gdev_unlock_nested(&gdev->sched_com_lock);
gdev_unlock(&phys->sched_com_lock);
+ GDEV_PRINT("Gdev#%d Sleep\n", gdev->id);
+
/* now the corresponding task will be suspended until some other tasks
will awaken it upon completions of their compute launches. */
gdev_sched_sleep();
@@ -52,6 +54,7 @@ static void gdev_vsched_crod_schedule_compute(struct gdev_sched_entity *se)
else {
phys->current_com = (void *)gdev;
gdev_unlock(&phys->sched_com_lock);
+ GDEV_PRINT("Gdev#%d Run\n", gdev->id);
}
}
View
1  driver/gdev/gdev_conf.h
@@ -42,7 +42,6 @@
#define GDEV_MEMCPY_IORW_LIMIT 0x400 /* bytes */
-#define GDEV_PHYSICAL_DEVICE_MAX_COUNT 8 /* # of physical devices */
#define GDEV0_VIRTUAL_DEVICE_COUNT 4 /* # of virtual devices */
#define GDEV1_VIRTUAL_DEVICE_COUNT 0 /* # of virtual devices */
#define GDEV2_VIRTUAL_DEVICE_COUNT 0 /* # of virtual devices */
View
19 driver/gdev/gdev_drv.c
@@ -49,16 +49,6 @@
*/
static dev_t dev;
static struct cdev *cdevs; /* character devices for virtual devices */
-static int VCOUNT_LIST[GDEV_PHYSICAL_DEVICE_MAX_COUNT] = {
- GDEV0_VIRTUAL_DEVICE_COUNT,
- GDEV1_VIRTUAL_DEVICE_COUNT,
- GDEV2_VIRTUAL_DEVICE_COUNT,
- GDEV3_VIRTUAL_DEVICE_COUNT,
- GDEV4_VIRTUAL_DEVICE_COUNT,
- GDEV5_VIRTUAL_DEVICE_COUNT,
- GDEV6_VIRTUAL_DEVICE_COUNT,
- GDEV7_VIRTUAL_DEVICE_COUNT,
-};
/**
* pointers to callback functions.
@@ -258,13 +248,14 @@ void gdev_sched_sleep(void)
schedule();
}
-void gdev_sched_wakeup(void *task)
+int gdev_sched_wakeup(void *task)
{
-retry:
if (!wake_up_process(task)) {
- GDEV_PRINT("Failed to wake up process, try again...\n");
- goto retry;
+ schedule_timeout_interruptible(1);
+ if (!wake_up_process(task))
+ return -EINVAL;
}
+ return 0;
}
void gdev_lock_init(struct gdev_lock *p)
View
3  runtime/user/gdev/gdev_lib.c
@@ -48,8 +48,9 @@ void gdev_sched_sleep(void)
{
}
-void gdev_sched_wakeup(void *task)
+int gdev_sched_wakeup(void *task)
{
+ return 0;
}
void gdev_lock_init(struct gdev_lock *p)
Please sign in to comment.
Something went wrong with that request. Please try again.