-
Notifications
You must be signed in to change notification settings - Fork 2
/
bfq-iosched.c
2970 lines (2534 loc) · 81.9 KB
/
bfq-iosched.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* BFQ, or Budget Fair Queueing, disk scheduler.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
* Paolo Valente <paolo.valente@unimore.it>
*
* Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
*
* BFQ is a proportional share disk scheduling algorithm based on the
* slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
* measured in number of sectors, to tasks instead of time slices.
* The disk is not granted to the active task for a given time slice,
* but until it has exahusted its assigned budget. This change from
* the time to the service domain allows BFQ to distribute the disk
* bandwidth among tasks as desired, without any distortion due to
* ZBR, workload fluctuations or other factors. BFQ uses an ad hoc
* internal scheduler, called B-WF2Q+, to schedule tasks according to
* their budgets. Thanks to this accurate scheduler, BFQ can afford
* to assign high budgets to disk-bound non-seeky tasks (to boost the
* throughput), and yet guarantee low latencies to interactive and
* soft real-time applications.
*
* BFQ has been introduced in [1], where the interested reader can
* find an accurate description of the algorithm, the bandwidth
* distribution and latency guarantees it provides, plus formal proofs
* of all the properties. With respect to the algorithm presented in
* the paper, this implementation adds several little heuristics, and
* a hierarchical extension, based on H-WF2Q+.
*
* B-WF2Q+ is based on WF2Q+, that is described in [2], together with
* H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
* complexity derives from the one introduced with EEVDF in [3].
*
* [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling
* with Deterministic Guarantees on Bandwidth Distribution,'',
* IEEE Transactions on Computer, May 2010.
*
* http://algo.ing.unimo.it/people/paolo/disk_sched/bfq-techreport.pdf
*
* [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
* Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
* Oct 1997.
*
* http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
*
* [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
* First: A Flexible and Accurate Mechanism for Proportional Share
* Resource Allocation,'' technical report.
*
* http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/cgroup.h>
#include <linux/elevator.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include "bfq.h"
/* Max number of dispatches in one round of service. */
static const int bfq_quantum = 4;
/* Expiration time of sync (0) and async (1) requests, in jiffies. */
static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
/* Maximum backwards seek, in KiB. */
static const int bfq_back_max = 16 * 1024;
/* Penalty of a backwards seek, in number of sectors. */
static const int bfq_back_penalty = 2;
/* Idling period duration, in jiffies. */
static int bfq_slice_idle = HZ / 125;
/* Default maximum budget values, in sectors and number of requests. */
static const int bfq_default_max_budget = 16 * 1024;
static const int bfq_max_budget_async_rq = 4;
/*
* Async to sync throughput distribution is controlled as follows:
* when an async request is served, the entity is charged the number
* of sectors of the request, multipled by the factor below
*/
static const int bfq_async_charge_factor = 10;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
static const int bfq_timeout_sync = HZ / 8;
static int bfq_timeout_async = HZ / 25;
struct kmem_cache *bfq_pool;
struct kmem_cache *bfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, bfq_ioc_count);
static struct completion *bfq_ioc_gone;
static DEFINE_SPINLOCK(bfq_ioc_gone_lock);
static DEFINE_SPINLOCK(cic_index_lock);
static DEFINE_IDA(cic_index_ida);
/* Below this threshold (in ms), we consider thinktime immediate. */
#define BFQ_MIN_TT 2
/* hw_tag detection: parallel requests threshold and min samples needed. */
#define BFQ_HW_QUEUE_THRESHOLD 4
#define BFQ_HW_QUEUE_SAMPLES 32
#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
/* Min samples used for peak rate estimation (for autotuning). */
#define BFQ_PEAK_RATE_SAMPLES 32
/* Shift used for peak rate fixed precision calculations. */
#define BFQ_RATE_SHIFT 16
#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private[0])
#define RQ_BFQQ(rq) ((rq)->elevator_private[1])
#include "bfq-ioc.c"
#include "bfq-sched.c"
#include "bfq-cgroup.c"
#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
IOPRIO_CLASS_IDLE)
#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
IOPRIO_CLASS_RT)
#define bfq_sample_valid(samples) ((samples) > 80)
/*
* We regard a request as SYNC, if either it's a read or has the SYNC bit
* set (in which case it could also be a direct WRITE).
*/
static inline int bfq_bio_sync(struct bio *bio)
{
if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
return 1;
return 0;
}
/*
* Scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing.
*/
static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
}
}
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
* We choose the request that is closesr to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
struct request *rq1,
struct request *rq2,
sector_t last)
{
sector_t s1, s2, d1 = 0, d2 = 0;
unsigned long back_max;
#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
unsigned wrap = 0; /* bit mask: requests behind the disk head? */
if (rq1 == NULL || rq1 == rq2)
return rq2;
if (rq2 == NULL)
return rq1;
if (rq_is_sync(rq1) && !rq_is_sync(rq2))
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
return rq1;
else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
return rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
/*
* By definition, 1KiB is 2 sectors.
*/
back_max = bfqd->bfq_back_max * 2;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek.
*/
if (s1 >= last)
d1 = s1 - last;
else if (s1 + back_max >= last)
d1 = (last - s1) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ1_WRAP;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ2_WRAP;
/* Found required data */
/*
* By doing switch() on the bit mask "wrap" we avoid having to
* check two variables for all permutations: --> faster!
*/
switch (wrap) {
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
if (d1 < d2)
return rq1;
else if (d2 < d1)
return rq2;
else {
if (s1 >= s2)
return rq1;
else
return rq2;
}
case BFQ_RQ2_WRAP:
return rq1;
case BFQ_RQ1_WRAP:
return rq2;
case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
default:
/*
* Since both rqs are wrapped,
* start with the one that's further behind head
* (--> only *one* back seek required),
* since back seek takes more time than forward.
*/
if (s1 <= s2)
return rq1;
else
return rq2;
}
}
static struct bfq_queue *
bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
sector_t sector, struct rb_node **ret_parent,
struct rb_node ***rb_link)
{
struct rb_node **p, *parent;
struct bfq_queue *bfqq = NULL;
parent = NULL;
p = &root->rb_node;
while (*p) {
struct rb_node **n;
parent = *p;
bfqq = rb_entry(parent, struct bfq_queue, pos_node);
/*
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
if (sector > blk_rq_pos(bfqq->next_rq))
n = &(*p)->rb_right;
else if (sector < blk_rq_pos(bfqq->next_rq))
n = &(*p)->rb_left;
else
break;
p = n;
bfqq = NULL;
}
*ret_parent = parent;
if (rb_link)
*rb_link = p;
bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
(long long unsigned)sector,
bfqq != NULL ? bfqq->pid : 0);
return bfqq;
}
static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
if (bfqq->pos_root != NULL) {
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
if (bfq_class_idle(bfqq))
return;
if (!bfqq->next_rq)
return;
bfqq->pos_root = &bfqd->rq_pos_tree;
__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
blk_rq_pos(bfqq->next_rq), &parent, &p);
if (__bfqq == NULL) {
rb_link_node(&bfqq->pos_node, parent, p);
rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
} else
bfqq->pos_root = NULL;
}
static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct request *last)
{
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct request *next = NULL, *prev = NULL;
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev != NULL)
prev = rb_entry_rq(rbprev);
if (rbnext != NULL)
next = rb_entry_rq(rbnext);
else {
rbnext = rb_first(&bfqq->sort_list);
if (rbnext && rbnext != &last->rb_node)
next = rb_entry_rq(rbnext);
}
return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
}
static void bfq_del_rq_rb(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
const int sync = rq_is_sync(rq);
BUG_ON(bfqq->queued[sync] == 0);
bfqq->queued[sync]--;
bfqd->queued--;
elv_rb_del(&bfqq->sort_list, rq);
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue)
bfq_del_bfqq_busy(bfqd, bfqq, 1);
/*
* Remove queue from request-position tree as it is empty.
*/
if (bfqq->pos_root != NULL) {
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
}
}
/* see the definition of bfq_async_charge_factor for details */
static inline unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
return blk_rq_sectors(rq) *
(1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->raising_coeff == 1) *
bfq_async_charge_factor));
}
/**
* bfq_updated_next_req - update the queue after a new next_rq selection.
* @bfqd: the device data the queue belongs to.
* @bfqq: the queue to update.
*
* If the first request of a queue changes we make sure that the queue
* has enough budget to serve at least its first request (if the
* request has grown). We do this because if the queue has not enough
* budget for its first request, it has to go through two dispatch
* rounds to actually get it dispatched.
*/
static void bfq_updated_next_req(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
struct bfq_service_tree *st = bfq_entity_service_tree(entity);
struct request *next_rq = bfqq->next_rq;
unsigned long new_budget;
if (next_rq == NULL)
return;
if (bfqq == bfqd->active_queue)
/*
* In order not to break guarantees, budgets cannot be
* changed after an entity has been selected.
*/
return;
BUG_ON(entity->tree != &st->active);
BUG_ON(entity == entity->sched_data->active_entity);
new_budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
entity->budget = new_budget;
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget);
bfq_activate_bfqq(bfqd, bfqq);
}
static void bfq_add_rq_rb(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_entity *entity = &bfqq->entity;
struct bfq_data *bfqd = bfqq->bfqd;
struct request *__alias, *next_rq, *prev;
unsigned long old_raising_coeff = bfqq->raising_coeff;
int idle_for_long_time = bfqq->budget_timeout +
bfqd->bfq_raising_min_idle_time < jiffies;
bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq));
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
/*
* Looks a little odd, but the first insert might return an alias,
* if that happens, put the alias on the dispatch list.
*/
while ((__alias = elv_rb_add(&bfqq->sort_list, rq)) != NULL)
bfq_dispatch_insert(bfqd->queue, __alias);
/*
* Check if this request is a better next-serve candidate.
*/
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
BUG_ON(next_rq == NULL);
bfqq->next_rq = next_rq;
/*
* Adjust priority tree position, if next_rq changes.
*/
if (prev != bfqq->next_rq)
bfq_rq_pos_tree_add(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) {
int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
bfqq->soft_rt_next_start < jiffies;
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
if (! bfqd->low_latency)
goto add_bfqq_busy;
/*
* If the queue is not being boosted and has been idle
* for enough time, start a weight-raising period
*/
if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) {
bfqq->raising_coeff = bfqd->bfq_raising_coeff;
bfqq->raising_cur_max_time = idle_for_long_time ?
bfqd->bfq_raising_max_time :
bfqd->bfq_raising_rt_max_time;
bfq_log_bfqq(bfqd, bfqq,
"wrais starting at %llu msec,"
"rais_max_time %u",
bfqq->last_rais_start_finish,
jiffies_to_msecs(bfqq->
raising_cur_max_time));
} else if (old_raising_coeff > 1) {
if (idle_for_long_time)
bfqq->raising_cur_max_time =
bfqd->bfq_raising_max_time;
else if (bfqq->raising_cur_max_time ==
bfqd->bfq_raising_rt_max_time &&
!soft_rt) {
bfqq->raising_coeff = 1;
bfq_log_bfqq(bfqd, bfqq,
"wrais ending at %llu msec,"
"rais_max_time %u",
bfqq->last_rais_start_finish,
jiffies_to_msecs(bfqq->
raising_cur_max_time));
}
}
if (old_raising_coeff != bfqq->raising_coeff)
entity->ioprio_changed = 1;
add_bfqq_busy:
bfq_add_bfqq_busy(bfqd, bfqq);
} else {
if(bfqd->low_latency && old_raising_coeff == 1 &&
!rq_is_sync(rq) &&
bfqq->last_rais_start_finish +
bfqd->bfq_raising_min_idle_time < jiffies) {
bfqq->raising_coeff = bfqd->bfq_raising_coeff;
entity->ioprio_changed = 1;
bfq_log_bfqq(bfqd, bfqq,
"non-idle wrais starting at %llu msec,"
"rais_max_time %u",
bfqq->last_rais_start_finish,
jiffies_to_msecs(bfqq->
raising_cur_max_time));
}
bfq_updated_next_req(bfqd, bfqq);
}
if(bfqd->low_latency &&
(old_raising_coeff == 1 || bfqq->raising_coeff == 1 ||
idle_for_long_time))
bfqq->last_rais_start_finish = jiffies;
}
static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq)
{
elv_rb_del(&bfqq->sort_list, rq);
bfqq->queued[rq_is_sync(rq)]--;
bfqq->bfqd->queued--;
bfq_add_rq_rb(rq);
}
static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
struct bio *bio)
{
struct task_struct *tsk = current;
struct cfq_io_context *cic;
struct bfq_queue *bfqq;
cic = bfq_cic_lookup(bfqd, tsk->io_context);
if (cic == NULL)
return NULL;
bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
if (bfqq != NULL) {
sector_t sector = bio->bi_sector + bio_sectors(bio);
return elv_rb_find(&bfqq->sort_list, sector);
}
return NULL;
}
static void bfq_activate_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
bfqd->rq_in_driver++;
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
(long long unsigned)bfqd->last_position);
}
static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
WARN_ON(bfqd->rq_in_driver == 0);
bfqd->rq_in_driver--;
}
static void bfq_remove_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
if (bfqq->next_rq == rq) {
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
bfq_updated_next_req(bfqd, bfqq);
}
list_del_init(&rq->queuelist);
bfq_del_rq_rb(rq);
if (rq->cmd_flags & REQ_META) {
WARN_ON(bfqq->meta_pending == 0);
bfqq->meta_pending--;
}
}
static int bfq_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *__rq;
__rq = bfq_find_rq_fmerge(bfqd, bio);
if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
static void bfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
struct bfq_queue *bfqq = RQ_BFQQ(req);
bfq_reposition_rq_rb(bfqq, req);
}
}
static void bfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
/*
* Reposition in fifo if next is older than rq.
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
if (bfqq->next_rq == next)
bfqq->next_rq = rq;
bfq_remove_request(next);
}
static int bfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct cfq_io_context *cic;
struct bfq_queue *bfqq;
/* Disallow merge of a sync bio into an async request. */
if (bfq_bio_sync(bio) && !rq_is_sync(rq))
return 0;
/*
* Lookup the bfqq that this bio will be queued with. Allow
* merge only if rq is queued there.
*/
cic = bfq_cic_lookup(bfqd, current->io_context);
if (cic == NULL)
return 0;
bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
return bfqq == RQ_BFQQ(rq);
}
static void __bfq_set_active_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
if (bfqq != NULL) {
bfq_mark_bfqq_must_alloc(bfqq);
bfq_mark_bfqq_budget_new(bfqq);
bfq_clear_bfqq_fifo_expire(bfqq);
bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
bfqq->entity.budget);
}
bfqd->active_queue = bfqq;
}
/*
* Get and set a new active queue for service.
*/
static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
if (!bfqq)
bfqq = bfq_get_next_queue(bfqd);
else
bfq_get_next_queue_forced(bfqd, bfqq);
__bfq_set_active_queue(bfqd, bfqq);
return bfqq;
}
static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
struct request *rq)
{
if (blk_rq_pos(rq) >= bfqd->last_position)
return blk_rq_pos(rq) - bfqd->last_position;
else
return bfqd->last_position - blk_rq_pos(rq);
}
/*
* Return true if bfqq has no request pending and rq is close enough to
* bfqd->last_position, or if rq is closer to bfqd->last_position than
* bfqq->next_rq
*/
static inline int bfq_rq_close(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
sector_t sdist = bfqq->seek_mean;
if (!bfq_sample_valid(bfqq->seek_samples))
sdist = BFQQ_SEEK_THR;
/* If seek_mean is large, using it as close criteria is meaningless */
if (sdist > BFQQ_SEEK_THR)
sdist = BFQQ_SEEK_THR;
return bfq_dist_from_last(bfqd, rq) <= sdist;
}
static struct bfq_queue *bfqq_close(struct bfq_data *bfqd,
struct bfq_queue *cur_bfqq)
{
struct rb_root *root = &bfqd->rq_pos_tree;
struct rb_node *parent, *node;
struct bfq_queue *__bfqq;
sector_t sector = bfqd->last_position;
if (RB_EMPTY_ROOT(root))
return NULL;
/*
* First, if we find a request starting at the end of the last
* request, choose it.
*/
__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
if (__bfqq != NULL)
return __bfqq;
/*
* If the exact sector wasn't found, the parent of the NULL leaf
* will contain the closest sector (rq_pos_tree sorted by next_request
* position).
*/
__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq))
return __bfqq;
if (blk_rq_pos(__bfqq->next_rq) < sector)
node = rb_next(&__bfqq->pos_node);
else
node = rb_prev(&__bfqq->pos_node);
if (node == NULL)
return NULL;
__bfqq = rb_entry(node, struct bfq_queue, pos_node);
if (bfq_rq_close(bfqd, cur_bfqq, __bfqq->next_rq))
return __bfqq;
return NULL;
}
/*
* bfqd - obvious
* cur_bfqq - passed in so that we don't decide that the current queue
* is closely cooperating with itself.
*
* We are assuming that cur_bfqq has dispatched at least one request,
* and that bfqd->last_position reflects a position on the disk associated
* with the I/O issued by cur_bfqq.
*/
static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
struct bfq_queue *cur_bfqq)
{
struct bfq_queue *bfqq;
if (bfq_class_idle(cur_bfqq))
return NULL;
if (!bfq_bfqq_sync(cur_bfqq))
return NULL;
if (BFQQ_SEEKY(cur_bfqq))
return NULL;
/* If device has only one backlogged bfq_queue, don't search. */
if (bfqd->busy_queues == 1)
return NULL;
/*
* We should notice if some of the queues are cooperating, e.g.
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
*/
bfqq = bfqq_close(bfqd, cur_bfqq);
if (bfqq == NULL || bfqq == cur_bfqq)
return NULL;
/*
* Do not merge queues from different bfq_groups.
*/
if (bfqq->entity.parent != cur_bfqq->entity.parent)
return NULL;
/*
* It only makes sense to merge sync queues.
*/
if (!bfq_bfqq_sync(bfqq))
return NULL;
if (BFQQ_SEEKY(bfqq))
return NULL;
/*
* Do not merge queues of different priority classes.
*/
if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
return NULL;
return bfqq;
}
/*
* If enough samples have been computed, return the current max budget
* stored in bfqd, which is dynamically updated according to the
* estimated disk peak rate; otherwise return the default max budget
*/
static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
{
return bfqd->budgets_assigned < 194 ? bfq_default_max_budget :
bfqd->bfq_max_budget;
}
/*
* Return min budget, which is a fraction of the current or default
* max budget (trying with 1/32)
*/
static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
{
return bfqd->budgets_assigned < 194 ? bfq_default_max_budget / 32 :
bfqd->bfq_max_budget / 32;
}
static void bfq_arm_slice_timer(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->active_queue;
struct cfq_io_context *cic;
unsigned long sl;
WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
/* Idling is disabled, either manually or by past process history. */
if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_idle_window(bfqq))
return;
/* Tasks have exited, don't wait. */
cic = bfqd->active_cic;
if (cic == NULL || atomic_read(&cic->ioc->nr_tasks) == 0)
return;
bfq_mark_bfqq_wait_request(bfqq);
/*
* We don't want to idle for seeks, but we do want to allow
* fair distribution of slice time for a process doing back-to-back
* seeks. So allow a little bit of time for him to submit a new rq.
*
* To prevent processes with (partly) seeky workloads from
* being too ill-treated, grant them a small fraction of the
* assigned budget before reducing the waiting time to
* BFQ_MIN_TT. This happened to help reduce latency.
*/
sl = bfqd->bfq_slice_idle;
if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) &&
bfqq->entity.service > bfq_max_budget(bfqd) / 8 &&
bfqq->raising_coeff == 1)
sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
else if (bfqq->raising_coeff > 1)
sl = sl * 3;
bfqd->last_idling_start = ktime_get();
mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
bfq_log(bfqd, "arm idle: %u/%u ms",
jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
}
/*
* Set the maximum time for the active queue to consume its
* budget. This prevents seeky processes from lowering the disk
* throughput (always guaranteed with a time slice scheme as in CFQ).
*/
static void bfq_set_budget_timeout(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->active_queue;
unsigned int timeout_coeff =
bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time ?
1 : (bfqq->entity.weight / bfqq->entity.orig_weight);
bfqd->last_budget_start = ktime_get();
bfq_clear_bfqq_budget_new(bfqq);
bfqq->budget_timeout = jiffies +
bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
timeout_coeff));
}
/*
* Move request from internal lists to the request queue dispatch list.
*/
static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq = RQ_BFQQ(rq);
bfq_remove_request(rq);
bfqq->dispatched++;
elv_dispatch_sort(q, rq);
if (bfq_bfqq_sync(bfqq))
bfqd->sync_flight++;
}
/*
* Return expired entry, or NULL to just start from scratch in rbtree.
*/
static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
{
struct request *rq = NULL;
if (bfq_bfqq_fifo_expire(bfqq))
return NULL;
bfq_mark_bfqq_fifo_expire(bfqq);
if (list_empty(&bfqq->fifo))
return NULL;
rq = rq_entry_fifo(bfqq->fifo.next);
if (time_before(jiffies, rq_fifo_time(rq)))
return NULL;
return rq;
}
/*
* Must be called with the queue_lock held.
*/
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
int process_refs, io_refs;
io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
process_refs = atomic_read(&bfqq->ref) - io_refs;
BUG_ON(process_refs < 0);
return process_refs;
}
static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
{
int process_refs, new_process_refs;
struct bfq_queue *__bfqq;
/*
* If there are no process references on the new_bfqq, then it is
* unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
* may have dropped their last reference (not just their last process
* reference).
*/
if (!bfqq_process_refs(new_bfqq))
return;
/* Avoid a circular list and skip interim queue merges. */
while ((__bfqq = new_bfqq->new_bfqq)) {
if (__bfqq == bfqq)
return;
new_bfqq = __bfqq;
}
process_refs = bfqq_process_refs(bfqq);
new_process_refs = bfqq_process_refs(new_bfqq);
/*
* If the process for the bfqq has gone away, there is no
* sense in merging the queues.
*/
if (process_refs == 0 || new_process_refs == 0)
return;
/*
* Merge in the direction of the lesser amount of work.
*/
if (new_process_refs >= process_refs) {
bfqq->new_bfqq = new_bfqq;
atomic_add(process_refs, &new_bfqq->ref);
} else {
new_bfqq->new_bfqq = bfqq;
atomic_add(new_process_refs, &bfqq->ref);
}
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
new_bfqq->pid);
}
static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)