-
Notifications
You must be signed in to change notification settings - Fork 0
/
0001-dma-Convert-from-tasklet-to-BH-workqueue.patch
3381 lines (2948 loc) · 114 KB
/
0001-dma-Convert-from-tasklet-to-BH-workqueue.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
From 44bd588278dddee78a6d128f7becf1ba1717d6a5 Mon Sep 17 00:00:00 2001
From: Allen Pais <apais@linux.microsoft.com>
Date: Wed, 7 Feb 2024 18:27:39 +0000
Subject: [PATCH 1/8] dma: Convert from tasklet to BH workqueue
The only generic interface to execute asynchronously in the BH context is
tasklet; however, it's marked deprecated and has some design flaws. To
replace tasklets, BH workqueue support was recently added. A BH workqueue
behaves similarly to regular workqueues except that the queued work items
are executed in the BH context.
This patch converts drivers/dma/* from tasklet to BH workqueue.
Based on the work done by Tejun Heo <tj@kernel.org>
Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git disable_work-v1
Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
drivers/dma/altera-msgdma.c | 14 ++++----
drivers/dma/apple-admac.c | 16 ++++-----
drivers/dma/at_hdmac.c | 2 +-
drivers/dma/at_xdmac.c | 14 ++++----
drivers/dma/bcm2835-dma.c | 2 +-
drivers/dma/dma-axi-dmac.c | 2 +-
drivers/dma/dma-jz4780.c | 2 +-
.../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 2 +-
drivers/dma/dw-edma/dw-edma-core.c | 2 +-
drivers/dma/dw/core.c | 12 +++----
drivers/dma/dw/regs.h | 2 +-
drivers/dma/ep93xx_dma.c | 14 ++++----
drivers/dma/fsl-edma-common.c | 2 +-
drivers/dma/fsl-qdma.c | 2 +-
drivers/dma/fsl_raid.c | 10 +++---
drivers/dma/fsl_raid.h | 2 +-
drivers/dma/fsldma.c | 14 ++++----
drivers/dma/fsldma.h | 2 +-
drivers/dma/hisi_dma.c | 2 +-
drivers/dma/hsu/hsu.c | 2 +-
drivers/dma/idma64.c | 4 +--
drivers/dma/img-mdc-dma.c | 2 +-
drivers/dma/imx-dma.c | 26 +++++++-------
drivers/dma/imx-sdma.c | 6 ++--
drivers/dma/ioat/dma.c | 16 ++++-----
drivers/dma/ioat/dma.h | 4 +--
drivers/dma/ioat/init.c | 2 +-
drivers/dma/k3dma.c | 18 +++++-----
drivers/dma/mediatek/mtk-cqdma.c | 32 ++++++++---------
drivers/dma/mediatek/mtk-hsdma.c | 2 +-
drivers/dma/mediatek/mtk-uart-apdma.c | 4 +--
drivers/dma/mmp_pdma.c | 12 +++----
drivers/dma/mmp_tdma.c | 10 +++---
drivers/dma/mpc512x_dma.c | 16 ++++-----
drivers/dma/mv_xor.c | 12 +++----
drivers/dma/mv_xor.h | 4 +--
drivers/dma/mv_xor_v2.c | 23 +++++++------
drivers/dma/mxs-dma.c | 12 +++----
drivers/dma/nbpfaxi.c | 14 ++++----
drivers/dma/owl-dma.c | 2 +-
drivers/dma/pch_dma.c | 16 ++++-----
drivers/dma/pl330.c | 30 ++++++++--------
drivers/dma/plx_dma.c | 12 +++----
drivers/dma/ppc4xx/adma.c | 18 +++++-----
drivers/dma/ppc4xx/adma.h | 4 +--
drivers/dma/ptdma/ptdma-dev.c | 2 +-
drivers/dma/ptdma/ptdma.h | 4 +--
drivers/dma/pxa_dma.c | 2 +-
drivers/dma/qcom/bam_dma.c | 34 +++++++++----------
drivers/dma/qcom/gpi.c | 17 +++++-----
drivers/dma/qcom/hidma.c | 10 +++---
drivers/dma/qcom/hidma.h | 4 +--
drivers/dma/qcom/hidma_ll.c | 10 +++---
drivers/dma/qcom/qcom_adm.c | 2 +-
drivers/dma/sa11x0-dma.c | 26 +++++++-------
drivers/dma/sf-pdma/sf-pdma.c | 26 ++++++++------
drivers/dma/sf-pdma/sf-pdma.h | 4 +--
drivers/dma/sprd-dma.c | 2 +-
drivers/dma/st_fdma.c | 2 +-
drivers/dma/ste_dma40.c | 16 ++++-----
drivers/dma/sun6i-dma.c | 32 ++++++++---------
drivers/dma/tegra186-gpc-dma.c | 2 +-
drivers/dma/tegra20-apb-dma.c | 20 ++++++-----
drivers/dma/tegra210-adma.c | 2 +-
drivers/dma/ti/edma.c | 2 +-
drivers/dma/ti/k3-udma.c | 12 ++++---
drivers/dma/ti/omap-dma.c | 2 +-
drivers/dma/timb_dma.c | 22 ++++++------
drivers/dma/txx9dmac.c | 32 ++++++++---------
drivers/dma/txx9dmac.h | 4 +--
drivers/dma/virt-dma.c | 8 ++---
drivers/dma/virt-dma.h | 8 ++---
drivers/dma/xgene-dma.c | 20 +++++------
drivers/dma/xilinx/xilinx_dma.c | 26 +++++++-------
drivers/dma/xilinx/xilinx_dpdma.c | 22 ++++++------
drivers/dma/xilinx/zynqmp_dma.c | 24 +++++++------
76 files changed, 418 insertions(+), 402 deletions(-)
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index a8e3615235b8..a008a48042bb 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -170,7 +170,7 @@ struct msgdma_sw_desc {
struct msgdma_device {
spinlock_t lock;
struct device *dev;
- struct tasklet_struct irq_tasklet;
+ struct work_struct irq_bh;
struct list_head pending_list;
struct list_head free_list;
struct list_head active_list;
@@ -676,12 +676,12 @@ static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
}
/**
- * msgdma_tasklet - Schedule completion tasklet
+ * msgdma_work - Queue completion work
* @t: Pointer to the Altera sSGDMA channel structure
*/
-static void msgdma_tasklet(struct tasklet_struct *t)
+static void msgdma_work(struct work_struct *work)
{
- struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
+ struct msgdma_device *mdev = container_of(work, struct msgdma_device, irq_bh);
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
@@ -740,7 +740,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
spin_unlock(&mdev->lock);
}
- tasklet_schedule(&mdev->irq_tasklet);
+ queue_work(system_bh_wq, &mdev->irq_bh);
/* Clear interrupt in mSGDMA controller */
iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
@@ -758,7 +758,7 @@ static void msgdma_dev_remove(struct msgdma_device *mdev)
return;
devm_free_irq(mdev->dev, mdev->irq, mdev);
- tasklet_kill(&mdev->irq_tasklet);
+ cancel_work_sync(&mdev->irq_bh);
list_del(&mdev->dmachan.device_node);
}
@@ -844,7 +844,7 @@ static int msgdma_probe(struct platform_device *pdev)
if (ret)
return ret;
- tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
+ INIT_WORK(&mdev->irq_bh, msgdma_work);
dma_cookie_init(&mdev->dmachan);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 5b63996640d9..e5597ab86487 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -87,7 +87,7 @@ struct admac_chan {
unsigned int no;
struct admac_data *host;
struct dma_chan chan;
- struct tasklet_struct tasklet;
+ struct work_struct bh;
u32 carveout;
@@ -518,8 +518,8 @@ static int admac_terminate_all(struct dma_chan *chan)
adchan->current_tx = NULL;
}
/*
- * Descriptors can only be freed after the tasklet
- * has been killed (in admac_synchronize).
+ * Descriptors can only be freed after the wq
+ * has been flushed (in admac_synchronize).
*/
list_splice_tail_init(&adchan->submitted, &adchan->to_free);
list_splice_tail_init(&adchan->issued, &adchan->to_free);
@@ -539,7 +539,7 @@ static void admac_synchronize(struct dma_chan *chan)
list_splice_tail_init(&adchan->to_free, &head);
spin_unlock_irqrestore(&adchan->lock, flags);
- tasklet_kill(&adchan->tasklet);
+ cancel_work_sync(&adchan->bh);
list_for_each_entry_safe(adtx, _adtx, &head, node) {
list_del(&adtx->node);
@@ -658,7 +658,7 @@ static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
tx->reclaimed_pos %= 2 * tx->buf_len;
admac_cyclic_write_desc(ad, channo, tx);
- tasklet_schedule(&adchan->tasklet);
+ queue_work(system_bh_wq, &adchan->bh);
}
spin_unlock_irqrestore(&adchan->lock, flags);
}
@@ -708,9 +708,9 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
return IRQ_HANDLED;
}
-static void admac_chan_tasklet(struct tasklet_struct *t)
+static void admac_chan_work(struct work_struct *work)
{
- struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
+ struct admac_chan *adchan = container_of(work, struct admac_chan, bh);
struct admac_tx *adtx;
struct dmaengine_desc_callback cb;
struct dmaengine_result tx_result;
@@ -881,7 +881,7 @@ static int admac_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&adchan->issued);
INIT_LIST_HEAD(&adchan->to_free);
list_add_tail(&adchan->chan.device_node, &dma->channels);
- tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
+ INIT_WORK(&adchan->bh, admac_chan_work);
}
err = reset_control_reset(ad->rstc);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fb89ecbf0cc5..435cbe0e42e5 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -263,7 +263,7 @@ enum atc_status {
* @per_if: peripheral interface
* @mem_if: memory interface
* @status: transmit status information from irq/prep* functions
- * to tasklet (use atomic operations)
+ * to wq (use atomic operations)
* @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 299396121e6d..47161c8cfc8f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -228,7 +228,7 @@ struct at_xdmac_chan {
u32 save_cndc;
u32 irq_status;
unsigned long status;
- struct tasklet_struct tasklet;
+ struct work_struct bh;
struct dma_slave_config sconfig;
spinlock_t lock;
@@ -1762,9 +1762,9 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
/* Then continue with usual descriptor management */
}
-static void at_xdmac_tasklet(struct tasklet_struct *t)
+static void at_xdmac_work(struct work_struct *work)
{
- struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
+ struct at_xdmac_chan *atchan = container_of(work, struct at_xdmac_chan, bh);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
@@ -1869,7 +1869,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
- tasklet_schedule(&atchan->tasklet);
+ queue_work(system_bh_wq, &atchan->bh);
ret = IRQ_HANDLED;
}
@@ -2307,7 +2307,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
return PTR_ERR(atxdmac->clk);
}
- /* Do not use dev res to prevent races with tasklet */
+ /* Do not use dev res to prevent races with work */
ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
if (ret) {
dev_err(&pdev->dev, "can't request irq\n");
@@ -2387,7 +2387,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
spin_lock_init(&atchan->lock);
INIT_LIST_HEAD(&atchan->xfers_list);
INIT_LIST_HEAD(&atchan->free_descs_list);
- tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
+ INIT_WORK(&atchan->bh, at_xdmac_work);
/* Clear pending interrupts. */
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
@@ -2449,7 +2449,7 @@ static void at_xdmac_remove(struct platform_device *pdev)
for (i = 0; i < atxdmac->dma.chancnt; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
- tasklet_kill(&atchan->tasklet);
+ cancel_work_sync(&atchan->bh);
at_xdmac_free_chan_resources(&atchan->chan);
}
}
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 9d74fe97452e..1bfa6ff78bba 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -846,7 +846,7 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od)
list_for_each_entry_safe(c, next, &od->ddev.channels,
vc.chan.device_node) {
list_del(&c->vc.chan.device_node);
- tasklet_kill(&c->vc.task);
+ cancel_work_sync(&c->vc.bh);
}
dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 2457a420c13d..ccb19ea3ad3b 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -1034,7 +1034,7 @@ static void axi_dmac_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
free_irq(dmac->irq, dmac);
- tasklet_kill(&dmac->chan.vchan.task);
+ flush_wor(k&dmac->chan.vchan.bh);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index c9cfa341db51..c58dbb8003ef 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1019,7 +1019,7 @@ static void jz4780_dma_remove(struct platform_device *pdev)
free_irq(jzdma->irq, jzdma);
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
- tasklet_kill(&jzdma->chan[i].vchan.task);
+ cancel_work_sync(&jzdma->chan[i].vchan.bh);
}
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a86a81ff0caa..787c55280637 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1636,7 +1636,7 @@ static void dw_remove(struct platform_device *pdev)
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
- tasklet_kill(&chan->vc.task);
+ cancel_work_sync(&chan->vc.bh);
}
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 68236247059d..60fdf8db4ad0 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -1003,7 +1003,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
dma_async_device_unregister(&dw->dma);
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
- tasklet_kill(&chan->vc.task);
+ cancel_work_sync(&chan->vc.bh);
list_del(&chan->vc.chan.device_node);
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5f7d690e3dba..e60f7468c991 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -181,7 +181,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
__func__);
dwc_dump_chan_regs(dwc);
- /* The tasklet will hopefully advance the queue... */
+ /* The work will hopefully advance the queue... */
return;
}
@@ -460,9 +460,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc, true);
}
-static void dw_dma_tasklet(struct tasklet_struct *t)
+static void dw_dma_work(struct work_struct *work)
{
- struct dw_dma *dw = from_tasklet(dw, t, tasklet);
+ struct dw_dma *dw = container_of(work, struct dw_dma, bh);
struct dw_dma_chan *dwc;
u32 status_xfer;
u32 status_err;
@@ -526,7 +526,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
}
- tasklet_schedule(&dw->tasklet);
+ queue_work(system_bh_wq, &dw->bh);
return IRQ_HANDLED;
}
@@ -1138,7 +1138,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
goto err_pdata;
}
- tasklet_setup(&dw->tasklet, dw_dma_tasklet);
+ INIT_WORK(&dw->bh, dw_dma_work);
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
dw->name, dw);
@@ -1283,7 +1283,7 @@ int do_dma_remove(struct dw_dma_chip *chip)
dma_async_device_unregister(&dw->dma);
free_irq(chip->irq, dw);
- tasklet_kill(&dw->tasklet);
+ cancel_work_sync(&dw->bh);
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
chan.device_node) {
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 76654bd13c1a..801d6a0a357a 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -315,7 +315,7 @@ struct dw_dma {
char name[20];
void __iomem *regs;
struct dma_pool *desc_pool;
- struct tasklet_struct tasklet;
+ struct work_struct bh;
/* channels */
struct dw_dma_chan *chan;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d6c60635e90d..0817ddc5be8a 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -136,7 +136,7 @@ struct ep93xx_dma_desc {
* @regs: memory mapped registers
* @irq: interrupt number of the channel
* @clk: clock used by this channel
- * @tasklet: channel specific tasklet used for callbacks
+ * @bh: channel specific work used for callbacks
* @lock: lock protecting the fields following
* @flags: flags for the channel
* @buffer: which buffer to use next (0/1)
@@ -167,7 +167,7 @@ struct ep93xx_dma_chan {
void __iomem *regs;
int irq;
struct clk *clk;
- struct tasklet_struct tasklet;
+ struct work_struct bh;
/* protects the fields following */
spinlock_t lock;
unsigned long flags;
@@ -745,9 +745,9 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags);
}
-static void ep93xx_dma_tasklet(struct tasklet_struct *t)
+static void ep93xx_dma_work(struct work_struct *work)
{
- struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
+ struct ep93xx_dma_chan *edmac = container_of(work, struct ep93xx_dma_cha, bh);
struct ep93xx_dma_desc *desc, *d;
struct dmaengine_desc_callback cb;
LIST_HEAD(list);
@@ -802,12 +802,12 @@ static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
desc->complete = true;
- tasklet_schedule(&edmac->tasklet);
+ queue_work(system_bh_wq, &edmac->bh);
break;
case INTERRUPT_NEXT_BUFFER:
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
- tasklet_schedule(&edmac->tasklet);
+ queue_work(system_bh_wq, &edmac->bh);
break;
default:
@@ -1351,7 +1351,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&edmac->active);
INIT_LIST_HEAD(&edmac->queue);
INIT_LIST_HEAD(&edmac->free_list);
- tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
+ INIT_WORK(&edmac->bh, ep93xx_dma_work);
list_add_tail(&edmac->chan.device_node,
&dma_dev->channels);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b53f46245c37..4c66b8754004 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -838,7 +838,7 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
list_for_each_entry_safe(chan, _chan,
&dmadev->channels, vchan.chan.device_node) {
list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
+ cancel_work_sync(&chan->vchan.bh);
}
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 47cb28468049..c8e47c127e00 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1262,7 +1262,7 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
list_for_each_entry_safe(chan, _chan,
&dmadev->channels, vchan.chan.device_node) {
list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
+ cancel_work_sync(&chan->vchan.bh);
}
}
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 014ff523d5ec..1a50039ad62a 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -155,9 +155,9 @@ static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
fsl_re_issue_pending(&re_chan->chan);
}
-static void fsl_re_dequeue(struct tasklet_struct *t)
+static void fsl_re_dequeue(struct work_struct *work)
{
- struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
+ struct fsl_re_chan *re_chan = container_of(work, struct fsl_re_chan, bh);
struct fsl_re_desc *desc, *_desc;
struct fsl_re_hw_desc *hwdesc;
unsigned long flags;
@@ -224,7 +224,7 @@ static irqreturn_t fsl_re_isr(int irq, void *data)
/* Clear interrupt */
out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
- tasklet_schedule(&re_chan->irqtask);
+ queue_work(system_bh_wq, &re_chan->bh);
return IRQ_HANDLED;
}
@@ -670,7 +670,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev,
snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
chandev = &chan_ofdev->dev;
- tasklet_setup(&chan->irqtask, fsl_re_dequeue);
+ INIT_WORK(&chan->bh, fsl_re_dequeue);
ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
if (ret) {
@@ -848,7 +848,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
static void fsl_re_remove_chan(struct fsl_re_chan *chan)
{
- tasklet_kill(&chan->irqtask);
+ cancel_work_sync(&chan->bh);
dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
chan->inb_phys_addr);
diff --git a/drivers/dma/fsl_raid.h b/drivers/dma/fsl_raid.h
index 69d743c04973..e6c4e3a16beb 100644
--- a/drivers/dma/fsl_raid.h
+++ b/drivers/dma/fsl_raid.h
@@ -275,7 +275,7 @@ struct fsl_re_chan {
struct dma_chan chan;
struct fsl_re_chan_cfg *jrregs;
int irq;
- struct tasklet_struct irqtask;
+ struct work_struct bh;
u32 alloc_count;
/* hw descriptor ring for inbound queue*/
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 18a6c4bf6275..6e5a791e06cd 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -968,20 +968,20 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
/*
- * Schedule the tasklet to handle all cleanup of the current
+ * Queue the work to handle all cleanup of the current
* transaction. It will start a new transaction if there is
* one pending.
*/
- tasklet_schedule(&chan->tasklet);
+ queue_work(system_bh_wq, &chan->bh);
chan_dbg(chan, "irq: Exit\n");
return IRQ_HANDLED;
}
-static void dma_do_tasklet(struct tasklet_struct *t)
+static void dma_do_work(struct work_struct *work)
{
- struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
+ struct fsldma_chan *chan = containe_of(work, struct fsldma_chan, bh);
- chan_dbg(chan, "tasklet entry\n");
+ chan_dbg(chan, "work entry\n");
spin_lock(&chan->desc_lock);
@@ -993,7 +993,7 @@ static void dma_do_tasklet(struct tasklet_struct *t)
spin_unlock(&chan->desc_lock);
- chan_dbg(chan, "tasklet exit\n");
+ chan_dbg(chan, "work exit\n");
}
static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1152,7 +1152,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
}
fdev->chan[chan->id] = chan;
- tasklet_setup(&chan->tasklet, dma_do_tasklet);
+ INIT_WORK(&chan->bh, dma_do_work);
snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
/* Initialize the channel */
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 308bed0a560a..0a7ef3aa62d5 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -172,7 +172,7 @@ struct fsldma_chan {
struct device *dev; /* Channel device */
int irq; /* Channel IRQ */
int id; /* Raw id of this channel */
- struct tasklet_struct tasklet;
+ struct work_struct bh;
u32 feature;
bool idle; /* DMA controller is idle */
#ifdef CONFIG_PM
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 4c47bff81064..083179614be7 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -720,7 +720,7 @@ static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev)
for (i = 0; i < hdma_dev->chan_num; i++) {
hisi_dma_disable_qp(hdma_dev, i);
- tasklet_kill(&hdma_dev->chan[i].vc.task);
+ cancel_work_sync(&hdma_dev->chan[i].vc.bh);
}
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index af5a2e252c25..5145d0604a14 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -500,7 +500,7 @@ int hsu_dma_remove(struct hsu_dma_chip *chip)
for (i = 0; i < hsu->nr_channels; i++) {
struct hsu_dma_chan *hsuc = &hsu->chan[i];
- tasklet_kill(&hsuc->vchan.task);
+ cancel_work_sync(&hsuc->vchan.bh);
}
return 0;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969d7d..b260bb972a5d 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -613,14 +613,14 @@ static void idma64_remove(struct idma64_chip *chip)
/*
* Explicitly call devm_request_irq() to avoid the side effects with
- * the scheduled tasklets.
+ * the queued work.
*/
devm_free_irq(chip->dev, chip->irq, idma64);
for (i = 0; i < idma64->dma.chancnt; i++) {
struct idma64_chan *idma64c = &idma64->chan[i];
- tasklet_kill(&idma64c->vchan.task);
+ cancel_work_sync(&idma64c->vchan.bh);
}
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0532dd2640dc..e53d185daebb 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -1031,7 +1031,7 @@ static void mdc_dma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, mchan->irq, mchan);
- tasklet_kill(&mchan->vc.task);
+ cancel_work_sync(&mchan->vc.bh);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ebf7c115d553..785641e72a1c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -144,7 +144,7 @@ struct imxdma_channel {
struct imxdma_engine *imxdma;
unsigned int channel;
- struct tasklet_struct dma_tasklet;
+ struct work_struct bh;
struct list_head ld_free;
struct list_head ld_queue;
struct list_head ld_active;
@@ -345,8 +345,8 @@ static void imxdma_watchdog(struct timer_list *t)
imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
- /* Tasklet watchdog error handler */
- tasklet_schedule(&imxdmac->dma_tasklet);
+ /* WQ watchdog error handler */
+ queue_work(system_bh_wq, &imxdmac->bh);
dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
imxdmac->channel);
}
@@ -391,8 +391,8 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
errcode |= IMX_DMA_ERR_BUFFER;
}
- /* Tasklet error handler */
- tasklet_schedule(&imxdma->channel[i].dma_tasklet);
+ /* WQ error handler */
+ queue_work(system_bh_wq, &imxdma->channel[i].bh);
dev_warn(imxdma->dev,
"DMA timeout on channel %d -%s%s%s%s\n", i,
@@ -449,8 +449,8 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
if (imxdma_chan_is_doing_cyclic(imxdmac))
- /* Tasklet progression */
- tasklet_schedule(&imxdmac->dma_tasklet);
+ /* WQ progression */
+ queue_work(system_bh_wq, &imxdmac->bh);
return;
}
@@ -463,8 +463,8 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
out:
imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
- /* Tasklet irq */
- tasklet_schedule(&imxdmac->dma_tasklet);
+ /* WQ irq */
+ queue_work(system_bh_wq, &imxdmac->bh);
}
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
@@ -593,9 +593,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
return 0;
}
-static void imxdma_tasklet(struct tasklet_struct *t)
+static void imxdma_work(struct work_struct *work)
{
- struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
+ struct imxdma_channel *imxdmac = container_of(work, struct imxdma_channel, bh);
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc, *next_desc;
unsigned long flags;
@@ -1143,7 +1143,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&imxdmac->ld_free);
INIT_LIST_HEAD(&imxdmac->ld_active);
- tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
+ INIT_WORK(&imxdmac->bh, imxdma_work);
imxdmac->chan.device = &imxdma->dma_device;
dma_cookie_init(&imxdmac->chan);
imxdmac->channel = i;
@@ -1212,7 +1212,7 @@ static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *
if (!is_imx1_dma(imxdma))
disable_irq(imxdmac->irq);
- tasklet_kill(&imxdmac->dma_tasklet);
+ cancel_work_sync(&imxdmac->bh);
}
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f81ecf5863e8..f1a898d09a6a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -885,7 +885,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
/*
* The callback is called from the interrupt context in order
* to reduce latency and to avoid the risk of altering the
- * SDMA transaction status by the time the client tasklet is
+ * SDMA transaction status by the time the client wq is
* executed.
*/
spin_unlock(&sdmac->vc.lock);
@@ -2368,11 +2368,11 @@ static void sdma_remove(struct platform_device *pdev)
kfree(sdma->script_addrs);
clk_unprepare(sdma->clk_ahb);
clk_unprepare(sdma->clk_ipg);
- /* Kill the tasklet */
+ /* Flush the work */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
- tasklet_kill(&sdmac->vc.task);
+ cancel_work_sync(&sdmac->vc.bh);
sdma_free_chan_resources(&sdmac->vc.chan);
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 79d8957f9e60..6690f542376c 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -110,7 +110,7 @@ irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
ioat_chan = ioat_chan_by_index(instance, bit);
if (test_bit(IOAT_RUN, &ioat_chan->state))
- tasklet_schedule(&ioat_chan->cleanup_task);
+ queue_work(system_bh_wq, &ioat_chan->bh);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -127,7 +127,7 @@ irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
struct ioatdma_chan *ioat_chan = data;
if (test_bit(IOAT_RUN, &ioat_chan->state))
- tasklet_schedule(&ioat_chan->cleanup_task);
+ queue_work(system_bh_wq, &ioat_chan->bh);
return IRQ_HANDLED;
}
@@ -139,8 +139,8 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
int chan_id = chan_num(ioat_chan);
struct msix_entry *msix;
- /* 1/ stop irq from firing tasklets
- * 2/ stop the tasklet from re-arming irqs
+ /* 1/ stop irq from queuing work
+ * 2/ stop the wq from re-arming irqs
*/
clear_bit(IOAT_RUN, &ioat_chan->state);
@@ -161,8 +161,8 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
/* flush inflight timers */
del_timer_sync(&ioat_chan->timer);
- /* flush inflight tasklet runs */
- tasklet_kill(&ioat_chan->cleanup_task);
+ /* flush inflight work runs */
+ cancel_work_sync(&ioat_chan->bh);
/* final cleanup now that everything is quiesced and can't re-arm */
ioat_cleanup_event(&ioat_chan->cleanup_task);
@@ -690,9 +690,9 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
-void ioat_cleanup_event(struct tasklet_struct *t)
+void ioat_cleanup_event(struct work_struct *work)
{
- struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
+ struct ioatdma_chan *ioat_chan = container_of(work, struct ioatdma_chan, bh);
ioat_cleanup(ioat_chan);
if (!test_bit(IOAT_RUN, &ioat_chan->state))
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a180171087a8..e348edeba409 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -109,7 +109,7 @@ struct ioatdma_chan {
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;
u64 *completion;
- struct tasklet_struct cleanup_task;
+ struct work_struct bh;
struct kobject kobj;
/* ioat v2 / v3 channel attributes
@@ -392,7 +392,7 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
enum dma_status
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
-void ioat_cleanup_event(struct tasklet_struct *t);
+void ioat_cleanup_event(struct work_struct *work);
void ioat_timer_event(struct timer_list *t);
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
void ioat_issue_pending(struct dma_chan *chan);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92cb82..40a5b4bf2c26 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -776,7 +776,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan;
timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
- tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event);
+ INIT_WORK(&ioat_chan->bh, ioat_cleanup_event);
}
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 5de8c21d41e7..cc908f16900f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -98,7 +98,7 @@ struct k3_dma_phy {
struct k3_dma_dev {
struct dma_device slave;
void __iomem *base;
- struct tasklet_struct task;
+ struct work_struct bh;
spinlock_t lock;
struct list_head chan_pending;
struct k3_dma_phy *phy;
@@ -252,7 +252,7 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
writel_relaxed(err2, d->base + INT_ERR2_RAW);
if (irq_chan)
- tasklet_schedule(&d->task);
+ queue_work(system_bh_wq, &d->bh);
if (irq_chan || err1 || err2)
return IRQ_HANDLED;
@@ -295,9 +295,9 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
return -EAGAIN;
}
-static void k3_dma_tasklet(struct tasklet_struct *t)
+static void k3_dma_work(struct work_struct *work)
{
- struct k3_dma_dev *d = from_tasklet(d, t, task);
+ struct k3_dma_dev *d = container_of(work, k3_dma_dev, bh);
struct k3_dma_phy *p;
struct k3_dma_chan *c, *cn;
unsigned pch, pch_alloc = 0;
@@ -432,8 +432,8 @@ static void k3_dma_issue_pending(struct dma_chan *chan)
if (list_empty(&c->node)) {
/* if new channel, add chan_pending */
list_add_tail(&c->node, &d->chan_pending);
- /* check in tasklet */
- tasklet_schedule(&d->task);
+ /* check in work */
+ queue_work(system_bh_wq, &d->bh);
dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
}
}
@@ -956,7 +956,7 @@ static int k3_dma_probe(struct platform_device *op)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
- tasklet_setup(&d->task, k3_dma_tasklet);
+ INIT_WORK(&d->bh, k3_dma_work);
platform_set_drvdata(op, d);
dev_info(&op->dev, "initialized\n");
@@ -981,9 +981,9 @@ static void k3_dma_remove(struct platform_device *op)
list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
list_del(&c->vc.chan.device_node);
- tasklet_kill(&c->vc.task);
+ cancel_work_sync(&c->vc.bh);
}
- tasklet_kill(&d->task);
+ cancel_work_sync(&d->bh);
clk_disable_unprepare(d->clk);
}
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 529100c5b9f5..c460805710c6 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -94,7 +94,7 @@ struct mtk_cqdma_vdesc {
* @base: The mapped register I/O base of this PC
* @irq: The IRQ that this PC are using
* @refcnt: Track how many VCs are using this PC
- * @tasklet: Tasklet for this PC
+ * @bh: Work for this PC
* @lock: Lock protect agaisting multiple VCs access PC
*/
struct mtk_cqdma_pchan {
@@ -104,7 +104,7 @@ struct mtk_cqdma_pchan {
refcount_t refcnt;
- struct tasklet_struct tasklet;
+ struct work_struct bh;
/* lock to protect PC */
spinlock_t lock;
@@ -355,9 +355,9 @@ static struct mtk_cqdma_vdesc
return ret;
}
-static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
+static void mtk_cqdma_work(struct work_struct *work)
{
- struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet);
+ struct mtk_cqdma_pchan *pc = container_of(work, struct mtk_cqdma_pchan, bh);
struct mtk_cqdma_vdesc *cvd = NULL;
unsigned long flags;
@@ -378,7 +378,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
kfree(cvd);
}
- /* re-enable interrupt before leaving tasklet */
+ /* re-enable interrupt before leaving work */
enable_irq(pc->irq);
}
@@ -386,11 +386,11 @@ static irqreturn_t mtk_cqdma_irq(int irq, void *devid)
{
struct mtk_cqdma_device *cqdma = devid;
irqreturn_t ret = IRQ_NONE;
- bool schedule_tasklet = false;
+ bool queue_work = false;