Skip to content

Commit

Permalink
mali: Convert some workqueues to use kthreads
Browse files Browse the repository at this point in the history
Inspired from this commit:
android-linux-stable/wahoo@06be35a

Another picked commit for this:
kerneltoast/android_kernel_google_wahoo@ba31334

Result: Phone booted to UI

Signed-off-by: Diep Quynh <remilia.1505@gmail.com>
Signed-off-by: Eamon Powell <eamonpowell@outlook.com>
  • Loading branch information
diepquynh authored and THEBOSS619 committed Sep 2, 2019
1 parent 24ae33e commit c4628ef
Show file tree
Hide file tree
Showing 13 changed files with 66 additions and 47 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/arm/tHEx/b_r15p0/mali_kbase_sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ const char *kbase_sync_status_string(int status);
/*
* Internal worker used to continue processing of atom.
*/
void kbase_sync_fence_wait_worker(struct work_struct *data);
void kbase_sync_fence_wait_worker(struct kthread_work *data);

#ifdef CONFIG_MALI_FENCE_DEBUG
/**
Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/arm/tHEx/b_r15p0/mali_kbase_sync_android.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,8 +441,8 @@ static void kbase_fence_wait_callback(struct sync_fence *fence,
* sync_timeline_signal. So we simply defer the work.
*/

INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbase_sync_fence_wait_worker);
kthread_queue_work(&kctx->worker, &katom->job_done_work);
}

int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
Expand All @@ -462,8 +462,8 @@ int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
/* We should cause the dependent jobs in the bag to be failed,
* to do this we schedule the work queue to complete this job */
INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbase_sync_fence_wait_worker);
kthread_queue_work(&katom->kctx->worker, &katom->job_done_work);
}

return 1;
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/arm/tHEx/b_r15p0/mali_kbase_sync_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@
#include "mali_kbase.h"
#include "mali_kbase_sync.h"

void kbase_sync_fence_wait_worker(struct work_struct *data)
void kbase_sync_fence_wait_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom;

katom = container_of(data, struct kbase_jd_atom, work);
katom = container_of(data, struct kbase_jd_atom, job_done_work);
kbase_soft_event_wait_callback(katom);
}

Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/arm/tHEx/b_r15p0/mali_kbase_sync_file.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,8 @@ static void kbase_fence_wait_callback(struct dma_fence *fence,
* kctx->jctx.lock and the callbacks are run synchronously from
* sync_timeline_signal. So we simply defer the work.
*/
INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbase_sync_fence_wait_worker);
kthread_queue_work(&kctx->worker, &katom->job_done_work);
}
}

Expand Down Expand Up @@ -247,8 +247,8 @@ int kbase_sync_fence_in_wait(struct kbase_jd_atom *katom)
/* We should cause the dependent jobs in the bag to be failed,
* to do this we schedule the work queue to complete this job */

INIT_WORK(&katom->work, kbase_sync_fence_wait_worker);
queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbase_sync_fence_wait_worker);
kthread_queue_work(&katom->kctx->worker, &katom->job_done_work);
}

return 1; /* completion to be done later by callback/worker */
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/arm/tHEx/b_r16p0/mali_kbase.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ int kbase_jd_submit(struct kbase_context *kctx,
* Handles retrying submission outside of IRQ context if it failed from within
* IRQ context.
*/
void kbase_jd_done_worker(struct work_struct *data);
void kbase_jd_done_worker(struct kthread_work *data);

void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
kbasep_js_atom_done_code done_code);
Expand Down
13 changes: 13 additions & 0 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_core_linux.c
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,7 @@ static int kbase_open(struct inode *inode, struct file *filp)
{
struct kbase_device *kbdev = NULL;
struct kbase_context *kctx;
struct sched_param param = { .sched_priority = 16 };
int ret = 0;
#ifdef CONFIG_DEBUG_FS
char kctx_name[64];
Expand Down Expand Up @@ -476,6 +477,18 @@ static int kbase_open(struct inode *inode, struct file *filp)
/* we don't treat this as a fail - just warn about it */
dev_warn(kbdev->dev, "couldn't add kctx to kctx_list\n");
}
kthread_init_worker(&kctx->worker);

kctx->worker_thread = kthread_run(kthread_worker_fn,
&kctx->worker, "mali_kctx_worker");

if (IS_ERR(kctx->worker_thread)) {
pr_err("unable to start mali worker thread\n");
goto out;
}

sched_setscheduler(kctx->worker_thread, SCHED_FIFO, &param);

}
return 0;

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_debug_job_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ static void kbase_job_fault_resume_event_cleanup(struct kbase_context *kctx)

event = kbase_job_fault_event_dequeue(kctx->kbdev,
&kctx->job_fault_resume_event_list);
kbase_jd_done_worker(&event->katom->work);
kbase_jd_done_worker(&event->katom->job_done_work);
}

}
Expand Down Expand Up @@ -186,7 +186,7 @@ static void kbase_job_fault_resume_worker(struct work_struct *data)
kbase_ctx_has_no_event_pending(kctx));

atomic_set(&kctx->job_fault_count, 0);
kbase_jd_done_worker(&katom->work);
kbase_jd_done_worker(&katom->job_done_work);

/* In case the following atoms were scheduled during failed job dump
* the job_done_worker was held. We need to rerun it after the dump
Expand Down
7 changes: 7 additions & 0 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/sizes.h>
#include <linux/kthread.h>

#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
#include <linux/bus_logger.h>
Expand Down Expand Up @@ -603,7 +604,11 @@ struct kbase_atom_dependency_systrace {
* snapshot of the age_count counter in kbase context.
*/
struct kbase_jd_atom {
/* kthread work list */
struct work_struct work;
struct kthread_work event_work;
struct kthread_work job_done_work;
struct kthread_work js_work;
ktime_t start_timestamp;

struct base_jd_udata udata;
Expand Down Expand Up @@ -2026,6 +2031,8 @@ struct kbase_context {
struct list_head event_coalesce_list;
struct mutex event_mutex;
atomic_t event_closed;
struct kthread_worker worker;
struct task_struct *worker_thread;
struct workqueue_struct *event_workq;
atomic_t event_count;
int event_coalesce_count;
Expand Down
10 changes: 5 additions & 5 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_dma_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
#include <mali_kbase.h>

static void
kbase_dma_fence_work(struct work_struct *pwork);
kbase_dma_fence_work(struct kthread_work *pwork);

static void
kbase_dma_fence_waiters_add(struct kbase_jd_atom *katom)
Expand Down Expand Up @@ -127,8 +127,8 @@ kbase_dma_fence_queue_work(struct kbase_jd_atom *katom)
struct kbase_context *kctx = katom->kctx;
bool ret;

INIT_WORK(&katom->work, kbase_dma_fence_work);
ret = queue_work(kctx->dma_fence.wq, &katom->work);
kthread_init_work(&katom->fence_work, kbase_dma_fence_work);
ret = kthread_queue_work(&kctx->worker, &katom->work);
/* Warn if work was already queued, that should not happen. */
WARN_ON(!ret);
}
Expand Down Expand Up @@ -171,12 +171,12 @@ kbase_dma_fence_cancel_atom(struct kbase_jd_atom *katom)
* This function will clean and mark all dependencies as satisfied
*/
static void
kbase_dma_fence_work(struct work_struct *pwork)
kbase_dma_fence_work(struct kthread_work *pwork)
{
struct kbase_jd_atom *katom;
struct kbase_jd_context *ctx;

katom = container_of(pwork, struct kbase_jd_atom, work);
katom = container_of(pwork, struct kbase_jd_atom, fence_work);
ctx = &katom->kctx->jctx;

mutex_lock(&ctx->lock);
Expand Down
9 changes: 4 additions & 5 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,10 @@ KBASE_EXPORT_TEST_API(kbase_event_dequeue);
* resources
* @data: Work structure
*/
static void kbase_event_process_noreport_worker(struct work_struct *data)
static void kbase_event_process_noreport_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
work);
event_work);
struct kbase_context *kctx = katom->kctx;

if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
Expand All @@ -197,8 +197,8 @@ static void kbase_event_process_noreport(struct kbase_context *kctx,
struct kbase_jd_atom *katom)
{
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
queue_work(kctx->event_workq, &katom->work);
kthread_init_work(&katom->event_work, kbase_event_process_noreport_worker);
kthread_queue_work(&kctx->worker, &katom->event_work);
} else {
kbase_event_process(kctx, katom);
}
Expand Down Expand Up @@ -297,7 +297,6 @@ void kbase_event_cleanup(struct kbase_context *kctx)
int event_count;

KBASE_DEBUG_ASSERT(kctx);
KBASE_DEBUG_ASSERT(kctx->event_workq);

flush_workqueue(kctx->event_workq);
destroy_workqueue(kctx->event_workq);
Expand Down
16 changes: 8 additions & 8 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_jd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1242,9 +1242,9 @@ KBASE_EXPORT_TEST_API(kbase_jd_submit);
#include <linux/sti/abc_common.h>
#endif

void kbase_jd_done_worker(struct work_struct *data)
void kbase_jd_done_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, job_done_work);
struct kbase_jd_context *jctx;
struct kbase_context *kctx;
struct kbasep_js_kctx_info *js_kctx_info;
Expand Down Expand Up @@ -1425,9 +1425,9 @@ void kbase_jd_done_worker(struct work_struct *data)
* running (by virtue of only being called on contexts that aren't
* scheduled).
*/
static void jd_cancel_worker(struct work_struct *data)
static void jd_cancel_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, work);
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom, job_done_work);
struct kbase_jd_context *jctx;
struct kbase_context *kctx;
struct kbasep_js_kctx_info *js_kctx_info;
Expand Down Expand Up @@ -1521,8 +1521,8 @@ void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
#endif

WARN_ON(work_pending(&katom->work));
INIT_WORK(&katom->work, kbase_jd_done_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbase_jd_done_worker);
kthread_queue_work(&kctx->worker, &katom->job_done_work);
}

KBASE_EXPORT_TEST_API(kbase_jd_done);
Expand All @@ -1545,8 +1545,8 @@ void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom)

katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;

INIT_WORK(&katom->work, jd_cancel_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, jd_cancel_worker);
kthread_queue_work(&kctx->worker, &katom->job_done_work);
}


Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_js.c
Original file line number Diff line number Diff line change
Expand Up @@ -2244,10 +2244,10 @@ struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
}


static void js_return_worker(struct work_struct *data)
static void js_return_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
work);
js_work);
struct kbase_context *kctx = katom->kctx;
struct kbase_device *kbdev = kctx->kbdev;
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
Expand Down Expand Up @@ -2364,8 +2364,8 @@ void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)

kbase_job_check_leave_disjoint(kctx->kbdev, katom);

INIT_WORK(&katom->work, js_return_worker);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->js_work, js_return_worker);
kthread_queue_work(&kctx->worker, &katom->js_work);
}

bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
Expand Down
22 changes: 11 additions & 11 deletions drivers/gpu/arm/tHEx/b_r16p0/mali_kbase_softjobs.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,10 +212,10 @@ void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
}
#endif

static void kbasep_soft_event_complete_job(struct work_struct *work)
static void kbasep_soft_event_complete_job(struct kthread_work *work)
{
struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
work);
job_done_work);
struct kbase_context *kctx = katom->kctx;
int resched;

Expand Down Expand Up @@ -244,10 +244,10 @@ void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
list_del(&katom->queue);

katom->event_code = BASE_JD_EVENT_DONE;
INIT_WORK(&katom->work,
kthread_init_work(&katom->job_done_work,
kbasep_soft_event_complete_job);
queue_work(kctx->jctx.job_done_wq,
&katom->work);
kthread_queue_work(&kctx->worker,
&katom->job_done_work);
} else {
/* There are still other waiting jobs, we cannot
* cancel the timer yet.
Expand Down Expand Up @@ -340,10 +340,10 @@ static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)

struct kbase_fence_debug_work {
struct kbase_jd_atom *katom;
struct work_struct work;
struct kthread_work work;
};

static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
static void kbase_fence_debug_wait_timeout_worker(struct kthread_work *work)
{
struct kbase_fence_debug_work *w = container_of(work,
struct kbase_fence_debug_work, work);
Expand All @@ -369,8 +369,8 @@ static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
/* Ignore allocation failure. */
if (work) {
work->katom = katom;
INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
queue_work(kctx->jctx.job_done_wq, &work->work);
kthread_init_work(&work->work, kbase_fence_debug_wait_timeout_worker);
kthread_queue_work(&kctx->worker, &work->work);
}
}
#endif /* CONFIG_MALI_FENCE_DEBUG */
Expand Down Expand Up @@ -406,8 +406,8 @@ void kbasep_soft_job_timeout_worker(struct timer_list *timer)
list_del(&katom->queue);

katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
queue_work(kctx->jctx.job_done_wq, &katom->work);
kthread_init_work(&katom->job_done_work, kbasep_soft_event_complete_job);
kthread_queue_work(&kctx->worker, &katom->job_done_work);
break;
#ifdef CONFIG_MALI_FENCE_DEBUG
case BASE_JD_REQ_SOFT_FENCE_WAIT:
Expand Down

0 comments on commit c4628ef

Please sign in to comment.