Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

add IO schedulers

  • Loading branch information...
commit 3376972a23857410f634ce90880e8e2da6294b44 1 parent dfffded
omegamoon authored
37 block/Kconfig.iosched 100644 → 100755
@@ -12,6 +12,17 @@ config IOSCHED_NOOP
12 12 that do their own scheduling and require only minimal assistance from
13 13 the kernel.
14 14
  15 +config IOSCHED_TEST
  16 + tristate "Test I/O scheduler"
  17 + depends on DEBUG_FS
  18 + default m
  19 + ---help---
  20 + The test I/O scheduler is a duplicate of the noop scheduler with
  21 + addition of test utlity.
  22 + It allows testing a block device by dispatching specific requests
  23 + according to the test case and declare PASS/FAIL according to the
  24 + requests completion error code.
  25 +
15 26 config IOSCHED_DEADLINE
16 27 tristate "Deadline I/O scheduler"
17 28 default y
@@ -43,6 +54,23 @@ config CFQ_GROUP_IOSCHED
43 54 ---help---
44 55 Enable group IO scheduling in CFQ.
45 56
  57 +config IOSCHED_VR
  58 + tristate "V(R) I/O scheduler"
  59 + default n
  60 + ---help---
  61 + Requests are chosen according to SSTF with a penalty of rev_penalty
  62 + for switching head direction.
  63 +
  64 +config IOSCHED_SIO
  65 + tristate "Simple I/O scheduler"
  66 + default y
  67 + ---help---
  68 + The Simple I/O scheduler is an extremely simple scheduler,
  69 + based on noop and deadline, that relies on deadlines to
  70 + ensure fairness. The algorithm does not do any sorting but
  71 + basic merging, trying to keep a minimum overhead. It is aimed
  72 + mainly for aleatory access devices (eg: flash devices).
  73 +
46 74 choice
47 75 prompt "Default I/O scheduler"
48 76 default DEFAULT_CFQ
@@ -59,13 +87,22 @@ choice
59 87 config DEFAULT_NOOP
60 88 bool "No-op"
61 89
  90 + config DEFAULT_VR
  91 + bool "V(R)" if IOSCHED_VR=y
  92 +
  93 + config DEFAULT_SIO
  94 + bool "SIO" if IOSCHED_SIO=y
  95 +
62 96 endchoice
63 97
64 98 config DEFAULT_IOSCHED
65 99 string
66 100 default "deadline" if DEFAULT_DEADLINE
67 101 default "cfq" if DEFAULT_CFQ
  102 + default "bfq" if DEFAULT_BFQ
68 103 default "noop" if DEFAULT_NOOP
  104 + default "vr" if DEFAULT_VR
  105 + default "sio" if DEFAULT_SIO
69 106
70 107 endmenu
71 108
3  block/Makefile 100644 → 100755
@@ -13,6 +13,9 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
13 13 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
14 14 obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
15 15 obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
  16 +obj-$(CONFIG_IOSCHED_VR) += vr-iosched.o
  17 +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
  18 +obj-$(CONFIG_IOSCHED_TEST) += test-iosched.o
16 19
17 20 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
18 21 obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
346 block/sio-iosched.c
... ... @@ -0,0 +1,346 @@
  1 +/*
  2 + * Simple IO scheduler
  3 + * Based on Noop, Deadline and V(R) IO schedulers.
  4 + *
  5 + * Copyright (C) 2010 Miguel Boton <mboton@gmail.com>
  6 + *
  7 + *
  8 + * This algorithm does not do any kind of sorting, as it is aimed for
  9 + * aleatory access devices, but it does some basic merging. We try to
  10 + * keep minimum overhead to achieve low latency.
  11 + *
  12 + * Asynchronous and synchronous requests are not treated separately, but
  13 + * we relay on deadlines to ensure fairness.
  14 + *
  15 + */
  16 +#include <linux/blkdev.h>
  17 +#include <linux/elevator.h>
  18 +#include <linux/bio.h>
  19 +#include <linux/module.h>
  20 +#include <linux/init.h>
  21 +#include <linux/slab.h>
  22 +
  23 +enum {
  24 + ASYNC,
  25 + SYNC,
  26 +};
  27 +
  28 +/* Tunables */
  29 +static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */
  30 +static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */
  31 +static const int fifo_batch = 16; /* # of sequential requests treated as one
  32 + by the above parameters. For throughput. */
  33 +
  34 +/* Elevator data */
  35 +struct sio_data {
  36 + /* Request queues */
  37 + struct list_head fifo_list[2];
  38 +
  39 + /* Attributes */
  40 + unsigned int batched;
  41 +
  42 + /* Settings */
  43 + int fifo_expire[2];
  44 + int fifo_batch;
  45 +};
  46 +
  47 +static void
  48 +sio_merged_requests(struct request_queue *q, struct request *rq,
  49 + struct request *next)
  50 +{
  51 + /*
  52 + * If next expires before rq, assign its expire time to rq
  53 + * and move into next position (next will be deleted) in fifo.
  54 + */
  55 + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
  56 + if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
  57 + list_move(&rq->queuelist, &next->queuelist);
  58 + rq_set_fifo_time(rq, rq_fifo_time(next));
  59 + }
  60 + }
  61 +
  62 + /* Delete next request */
  63 + rq_fifo_clear(next);
  64 +}
  65 +
  66 +static void
  67 +sio_add_request(struct request_queue *q, struct request *rq)
  68 +{
  69 + struct sio_data *sd = q->elevator->elevator_data;
  70 + const int sync = rq_is_sync(rq);
  71 +
  72 + /*
  73 + * Add request to the proper fifo list and set its
  74 + * expire time.
  75 + */
  76 + rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync]);
  77 + list_add_tail(&rq->queuelist, &sd->fifo_list[sync]);
  78 +}
  79 +
  80 +static int
  81 +sio_queue_empty(struct request_queue *q)
  82 +{
  83 + struct sio_data *sd = q->elevator->elevator_data;
  84 +
  85 + /* Check if fifo lists are empty */
  86 + return list_empty(&sd->fifo_list[SYNC]) &&
  87 + list_empty(&sd->fifo_list[ASYNC]);
  88 +}
  89 +
  90 +static struct request *
  91 +sio_expired_request(struct sio_data *sd, int sync)
  92 +{
  93 + struct request *rq;
  94 +
  95 + if (list_empty(&sd->fifo_list[sync]))
  96 + return NULL;
  97 +
  98 + /* Retrieve request */
  99 + rq = rq_entry_fifo(sd->fifo_list[sync].next);
  100 +
  101 + /* Request has expired */
  102 + if (time_after(jiffies, rq_fifo_time(rq)))
  103 + return rq;
  104 +
  105 + return NULL;
  106 +}
  107 +
  108 +static struct request *
  109 +sio_choose_expired_request(struct sio_data *sd)
  110 +{
  111 + struct request *sync = sio_expired_request(sd, SYNC);
  112 + struct request *async = sio_expired_request(sd, ASYNC);
  113 +
  114 + /*
  115 + * Check expired requests. Asynchronous requests have
  116 + * priority over synchronous.
  117 + */
  118 + if (sync && async)
  119 + return async;
  120 + if (sync)
  121 + return sync;
  122 +
  123 + return async;
  124 +
  125 +}
  126 +
  127 +static struct request *
  128 +sio_choose_request(struct sio_data *sd)
  129 +{
  130 + /*
  131 + * Retrieve request from available fifo list.
  132 + * Synchronous requests have priority over asynchronous.
  133 + */
  134 + if (!list_empty(&sd->fifo_list[SYNC]))
  135 + return rq_entry_fifo(sd->fifo_list[SYNC].next);
  136 +
  137 + if (!list_empty(&sd->fifo_list[ASYNC]))
  138 + return rq_entry_fifo(sd->fifo_list[ASYNC].next);
  139 +
  140 + return NULL;
  141 +}
  142 +
  143 +static inline void
  144 +sio_dispatch_request(struct sio_data *sd, struct request *rq)
  145 +{
  146 + /*
  147 + * Remove the request from the fifo list
  148 + * and dispatch it.
  149 + */
  150 + rq_fifo_clear(rq);
  151 + elv_dispatch_add_tail(rq->q, rq);
  152 +
  153 + sd->batched++;
  154 +}
  155 +
  156 +static int
  157 +sio_dispatch_requests(struct request_queue *q, int force)
  158 +{
  159 + struct sio_data *sd = q->elevator->elevator_data;
  160 + struct request *rq = NULL;
  161 +
  162 + /*
  163 + * Retrieve any expired request after a batch of
  164 + * sequential requests.
  165 + */
  166 + if (sd->batched > sd->fifo_batch) {
  167 + sd->batched = 0;
  168 + rq = sio_choose_expired_request(sd);
  169 + }
  170 +
  171 + /* Retrieve request */
  172 + if (!rq) {
  173 + rq = sio_choose_request(sd);
  174 + if (!rq)
  175 + return 0;
  176 + }
  177 +
  178 + /* Dispatch request */
  179 + sio_dispatch_request(sd, rq);
  180 +
  181 + return 1;
  182 +}
  183 +
  184 +static struct request *
  185 +sio_former_request(struct request_queue *q, struct request *rq)
  186 +{
  187 + struct sio_data *sd = q->elevator->elevator_data;
  188 + const int sync = rq_is_sync(rq);
  189 +
  190 + if (rq->queuelist.prev == &sd->fifo_list[sync])
  191 + return NULL;
  192 +
  193 + /* Return former request */
  194 + return list_entry(rq->queuelist.prev, struct request, queuelist);
  195 +}
  196 +
  197 +static struct request *
  198 +sio_latter_request(struct request_queue *q, struct request *rq)
  199 +{
  200 + struct sio_data *sd = q->elevator->elevator_data;
  201 + const int sync = rq_is_sync(rq);
  202 +
  203 + if (rq->queuelist.next == &sd->fifo_list[sync])
  204 + return NULL;
  205 +
  206 + /* Return latter request */
  207 + return list_entry(rq->queuelist.next, struct request, queuelist);
  208 +}
  209 +
  210 +static void *
  211 +sio_init_queue(struct request_queue *q)
  212 +{
  213 + struct sio_data *sd;
  214 +
  215 + /* Allocate structure */
  216 + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
  217 + if (!sd)
  218 + return NULL;
  219 +
  220 + /* Initialize fifo lists */
  221 + INIT_LIST_HEAD(&sd->fifo_list[SYNC]);
  222 + INIT_LIST_HEAD(&sd->fifo_list[ASYNC]);
  223 +
  224 + /* Initialize data */
  225 + sd->batched = 0;
  226 + sd->fifo_expire[SYNC] = sync_expire;
  227 + sd->fifo_expire[ASYNC] = async_expire;
  228 + sd->fifo_batch = fifo_batch;
  229 +
  230 + return sd;
  231 +}
  232 +
  233 +static void
  234 +sio_exit_queue(struct elevator_queue *e)
  235 +{
  236 + struct sio_data *sd = e->elevator_data;
  237 +
  238 + BUG_ON(!list_empty(&sd->fifo_list[SYNC]));
  239 + BUG_ON(!list_empty(&sd->fifo_list[ASYNC]));
  240 +
  241 + /* Free structure */
  242 + kfree(sd);
  243 +}
  244 +
  245 +/*
  246 + * sysfs code
  247 + */
  248 +
  249 +static ssize_t
  250 +sio_var_show(int var, char *page)
  251 +{
  252 + return sprintf(page, "%d\n", var);
  253 +}
  254 +
  255 +static ssize_t
  256 +sio_var_store(int *var, const char *page, size_t count)
  257 +{
  258 + char *p = (char *) page;
  259 +
  260 + *var = simple_strtol(p, &p, 10);
  261 + return count;
  262 +}
  263 +
  264 +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  265 +static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  266 +{ \
  267 + struct sio_data *sd = e->elevator_data; \
  268 + int __data = __VAR; \
  269 + if (__CONV) \
  270 + __data = jiffies_to_msecs(__data); \
  271 + return sio_var_show(__data, (page)); \
  272 +}
  273 +SHOW_FUNCTION(sio_sync_expire_show, sd->fifo_expire[SYNC], 1);
  274 +SHOW_FUNCTION(sio_async_expire_show, sd->fifo_expire[ASYNC], 1);
  275 +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0);
  276 +#undef SHOW_FUNCTION
  277 +
  278 +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  279 +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  280 +{ \
  281 + struct sio_data *sd = e->elevator_data; \
  282 + int __data; \
  283 + int ret = sio_var_store(&__data, (page), count); \
  284 + if (__data < (MIN)) \
  285 + __data = (MIN); \
  286 + else if (__data > (MAX)) \
  287 + __data = (MAX); \
  288 + if (__CONV) \
  289 + *(__PTR) = msecs_to_jiffies(__data); \
  290 + else \
  291 + *(__PTR) = __data; \
  292 + return ret; \
  293 +}
  294 +STORE_FUNCTION(sio_sync_expire_store, &sd->fifo_expire[SYNC], 0, INT_MAX, 1);
  295 +STORE_FUNCTION(sio_async_expire_store, &sd->fifo_expire[ASYNC], 0, INT_MAX, 1);
  296 +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0);
  297 +#undef STORE_FUNCTION
  298 +
  299 +#define DD_ATTR(name) \
  300 + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \
  301 + sio_##name##_store)
  302 +
  303 +static struct elv_fs_entry sio_attrs[] = {
  304 + DD_ATTR(sync_expire),
  305 + DD_ATTR(async_expire),
  306 + DD_ATTR(fifo_batch),
  307 + __ATTR_NULL
  308 +};
  309 +
  310 +static struct elevator_type iosched_sio = {
  311 + .ops = {
  312 + .elevator_merge_req_fn = sio_merged_requests,
  313 + .elevator_dispatch_fn = sio_dispatch_requests,
  314 + .elevator_add_req_fn = sio_add_request,
  315 +// .elevator_queue_empty_fn = sio_queue_empty,
  316 + .elevator_former_req_fn = sio_former_request,
  317 + .elevator_latter_req_fn = sio_latter_request,
  318 + .elevator_init_fn = sio_init_queue,
  319 + .elevator_exit_fn = sio_exit_queue,
  320 + },
  321 +
  322 + .elevator_attrs = sio_attrs,
  323 + .elevator_name = "sio",
  324 + .elevator_owner = THIS_MODULE,
  325 +};
  326 +
  327 +static int __init sio_init(void)
  328 +{
  329 + /* Register elevator */
  330 + elv_register(&iosched_sio);
  331 +
  332 + return 0;
  333 +}
  334 +
  335 +static void __exit sio_exit(void)
  336 +{
  337 + /* Unregister elevator */
  338 + elv_unregister(&iosched_sio);
  339 +}
  340 +
  341 +module_init(sio_init);
  342 +module_exit(sio_exit);
  343 +
  344 +MODULE_AUTHOR("Miguel Boton");
  345 +MODULE_LICENSE("GPL");
  346 +MODULE_DESCRIPTION("Simple IO scheduler");
1,038 block/test-iosched.c
... ... @@ -0,0 +1,1038 @@
  1 +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
  2 + *
  3 + * This program is free software; you can redistribute it and/or modify
  4 + * it under the terms of the GNU General Public License version 2 and
  5 + * only version 2 as published by the Free Software Foundation.
  6 + *
  7 + * This program is distributed in the hope that it will be useful,
  8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10 + * GNU General Public License for more details.
  11 + *
  12 + * The test scheduler allows to test the block device by dispatching
  13 + * specific requests according to the test case and declare PASS/FAIL
  14 + * according to the requests completion error code.
  15 + * Each test is exposed via debugfs and can be triggered by writing to
  16 + * the debugfs file.
  17 + *
  18 + */
  19 +
  20 +/* elevator test iosched */
  21 +#include <linux/blkdev.h>
  22 +#include <linux/elevator.h>
  23 +#include <linux/bio.h>
  24 +#include <linux/module.h>
  25 +#include <linux/slab.h>
  26 +#include <linux/init.h>
  27 +#include <linux/debugfs.h>
  28 +#include <linux/test-iosched.h>
  29 +#include <linux/delay.h>
  30 +#include "blk.h"
  31 +
  32 +#define MODULE_NAME "test-iosched"
  33 +#define WR_RD_START_REQ_ID 1234
  34 +#define UNIQUE_START_REQ_ID 5678
  35 +#define TIMEOUT_TIMER_MS 40000
  36 +#define TEST_MAX_TESTCASE_ROUNDS 15
  37 +
  38 +#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
  39 +#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
  40 +#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
  41 +
  42 +static DEFINE_SPINLOCK(blk_dev_test_list_lock);
  43 +static LIST_HEAD(blk_dev_test_list);
  44 +static struct test_data *ptd;
  45 +
  46 +/* Get the request after `test_rq' in the test requests list */
  47 +static struct test_request *
  48 +latter_test_request(struct request_queue *q,
  49 + struct test_request *test_rq)
  50 +{
  51 + struct test_data *td = q->elevator->elevator_data;
  52 +
  53 + if (test_rq->queuelist.next == &td->test_queue)
  54 + return NULL;
  55 + return list_entry(test_rq->queuelist.next, struct test_request,
  56 + queuelist);
  57 +}
  58 +
  59 +/**
  60 + * test_iosched_get_req_queue() - returns the request queue
  61 + * served by the scheduler
  62 + */
  63 +struct request_queue *test_iosched_get_req_queue(void)
  64 +{
  65 + if (!ptd)
  66 + return NULL;
  67 +
  68 + return ptd->req_q;
  69 +}
  70 +EXPORT_SYMBOL(test_iosched_get_req_queue);
  71 +
  72 +/**
  73 + * test_iosched_mark_test_completion() - Wakeup the debugfs
  74 + * thread, waiting on the test completion
  75 + */
  76 +void test_iosched_mark_test_completion(void)
  77 +{
  78 + if (!ptd)
  79 + return;
  80 +
  81 + ptd->test_state = TEST_COMPLETED;
  82 + wake_up(&ptd->wait_q);
  83 +}
  84 +EXPORT_SYMBOL(test_iosched_mark_test_completion);
  85 +
  86 +/* Check if all the queued test requests were completed */
  87 +static void check_test_completion(void)
  88 +{
  89 + struct test_request *test_rq;
  90 + struct request *rq;
  91 +
  92 + list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
  93 + rq = test_rq->rq;
  94 + if (!test_rq->req_completed)
  95 + return;
  96 + }
  97 +
  98 + test_pr_info("%s: Test is completed", __func__);
  99 +
  100 + test_iosched_mark_test_completion();
  101 +}
  102 +
  103 +/*
  104 + * A callback to be called per bio completion.
  105 + * Frees the bio memory.
  106 + */
  107 +static void end_test_bio(struct bio *bio, int err)
  108 +{
  109 + if (err)
  110 + clear_bit(BIO_UPTODATE, &bio->bi_flags);
  111 +
  112 + bio_put(bio);
  113 +}
  114 +
  115 +/*
  116 + * A callback to be called per request completion.
  117 + * the request memory is not freed here, will be freed later after the test
  118 + * results checking.
  119 + */
  120 +static void end_test_req(struct request *rq, int err)
  121 +{
  122 + struct test_request *test_rq;
  123 +
  124 + test_rq = (struct test_request *)rq->elevator_private[0];
  125 + BUG_ON(!test_rq);
  126 +
  127 + test_pr_info("%s: request %d completed, err=%d",
  128 + __func__, test_rq->req_id, err);
  129 +
  130 + test_rq->req_completed = true;
  131 + test_rq->req_result = err;
  132 +
  133 + check_test_completion();
  134 +}
  135 +
  136 +/**
  137 + * test_iosched_add_unique_test_req() - Create and queue a non
  138 + * read/write request (such as FLUSH/DISCRAD/SANITIZE).
  139 + * @is_err_expcted: A flag to indicate if this request
  140 + * should succeed or not
  141 + * @req_unique: The type of request to add
  142 + * @start_sec: start address of the first bio
  143 + * @nr_sects: number of sectors in the request
  144 + * @end_req_io: specific completion callback. When not
  145 + * set, the defaulcallback will be used
  146 + */
  147 +int test_iosched_add_unique_test_req(int is_err_expcted,
  148 + enum req_unique_type req_unique,
  149 + int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
  150 +{
  151 + struct bio *bio;
  152 + struct request *rq;
  153 + int rw_flags;
  154 + struct test_request *test_rq;
  155 +
  156 + if (!ptd)
  157 + return -ENODEV;
  158 +
  159 + bio = bio_alloc(GFP_KERNEL, 0);
  160 + if (!bio) {
  161 + test_pr_err("%s: Failed to allocate a bio", __func__);
  162 + return -ENODEV;
  163 + }
  164 + bio_get(bio);
  165 + bio->bi_end_io = end_test_bio;
  166 +
  167 + switch (req_unique) {
  168 + case REQ_UNIQUE_FLUSH:
  169 + bio->bi_rw = WRITE_FLUSH;
  170 + break;
  171 + case REQ_UNIQUE_DISCARD:
  172 + bio->bi_rw = REQ_WRITE | REQ_DISCARD;
  173 + bio->bi_size = nr_sects << 9;
  174 + bio->bi_sector = start_sec;
  175 + break;
  176 + case REQ_UNIQUE_SANITIZE:
  177 + bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
  178 + break;
  179 + default:
  180 + test_pr_err("%s: Invalid request type %d", __func__,
  181 + req_unique);
  182 + bio_put(bio);
  183 + return -ENODEV;
  184 + }
  185 +
  186 + rw_flags = bio_data_dir(bio);
  187 + if (bio->bi_rw & REQ_SYNC)
  188 + rw_flags |= REQ_SYNC;
  189 +
  190 + rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
  191 + if (!rq) {
  192 + test_pr_err("%s: Failed to allocate a request", __func__);
  193 + bio_put(bio);
  194 + return -ENODEV;
  195 + }
  196 +
  197 + init_request_from_bio(rq, bio);
  198 + if (end_req_io)
  199 + rq->end_io = end_req_io;
  200 + else
  201 + rq->end_io = end_test_req;
  202 +
  203 + test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
  204 + if (!test_rq) {
  205 + test_pr_err("%s: Failed to allocate a test request", __func__);
  206 + bio_put(bio);
  207 + blk_put_request(rq);
  208 + return -ENODEV;
  209 + }
  210 + test_rq->req_completed = false;
  211 + test_rq->req_result = -EINVAL;
  212 + test_rq->rq = rq;
  213 + test_rq->is_err_expected = is_err_expcted;
  214 + rq->elevator_private[0] = (void *)test_rq;
  215 + test_rq->req_id = ptd->unique_next_req_id++;
  216 +
  217 + test_pr_debug(
  218 + "%s: added request %d to the test requests list, type = %d",
  219 + __func__, test_rq->req_id, req_unique);
  220 +
  221 + list_add_tail(&test_rq->queuelist, &ptd->test_queue);
  222 +
  223 + return 0;
  224 +}
  225 +EXPORT_SYMBOL(test_iosched_add_unique_test_req);
  226 +
  227 +/*
  228 + * Get a pattern to be filled in the request data buffer.
  229 + * If the pattern used is (-1) the buffer will be filled with sequential
  230 + * numbers
  231 + */
  232 +static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
  233 +{
  234 + int i = 0;
  235 + int num_of_dwords = num_bytes/sizeof(int);
  236 +
  237 + if (pattern == TEST_NO_PATTERN)
  238 + return;
  239 +
  240 + /* num_bytes should be aligned to sizeof(int) */
  241 + BUG_ON((num_bytes % sizeof(int)) != 0);
  242 +
  243 + if (pattern == TEST_PATTERN_SEQUENTIAL) {
  244 + for (i = 0; i < num_of_dwords; i++)
  245 + buf[i] = i;
  246 + } else {
  247 + for (i = 0; i < num_of_dwords; i++)
  248 + buf[i] = pattern;
  249 + }
  250 +}
  251 +
  252 +/**
  253 + * test_iosched_add_wr_rd_test_req() - Create and queue a
  254 + * read/write request.
  255 + * @is_err_expcted: A flag to indicate if this request
  256 + * should succeed or not
  257 + * @direction: READ/WRITE
  258 + * @start_sec: start address of the first bio
  259 + * @num_bios: number of BIOs to be allocated for the
  260 + * request
  261 + * @pattern: A pattern, to be written into the write
  262 + * requests data buffer. In case of READ
  263 + * request, the given pattern is kept as
  264 + * the expected pattern. The expected
  265 + * pattern will be compared in the test
  266 + * check result function. If no comparisson
  267 + * is required, set pattern to
  268 + * TEST_NO_PATTERN.
  269 + * @end_req_io: specific completion callback. When not
  270 + * set,the default callback will be used
  271 + *
  272 + * This function allocates the test request and the block
  273 + * request and calls blk_rq_map_kern which allocates the
  274 + * required BIO. The allocated test request and the block
  275 + * request memory is freed at the end of the test and the
  276 + * allocated BIO memory is freed by end_test_bio.
  277 + */
  278 +int test_iosched_add_wr_rd_test_req(int is_err_expcted,
  279 + int direction, int start_sec,
  280 + int num_bios, int pattern, rq_end_io_fn *end_req_io)
  281 +{
  282 + struct request *rq = NULL;
  283 + struct test_request *test_rq = NULL;
  284 + int rw_flags = 0;
  285 + int buf_size = 0;
  286 + int ret = 0, i = 0;
  287 + unsigned int *bio_ptr = NULL;
  288 + struct bio *bio = NULL;
  289 +
  290 + if (!ptd)
  291 + return -ENODEV;
  292 +
  293 + rw_flags = direction;
  294 +
  295 + rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
  296 + if (!rq) {
  297 + test_pr_err("%s: Failed to allocate a request", __func__);
  298 + return -ENODEV;
  299 + }
  300 +
  301 + test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
  302 + if (!test_rq) {
  303 + test_pr_err("%s: Failed to allocate test request", __func__);
  304 + blk_put_request(rq);
  305 + return -ENODEV;
  306 + }
  307 +
  308 + buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
  309 + test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
  310 + if (!test_rq->bios_buffer) {
  311 + test_pr_err("%s: Failed to allocate the data buf", __func__);
  312 + goto err;
  313 + }
  314 + test_rq->buf_size = buf_size;
  315 +
  316 + if (direction == WRITE)
  317 + fill_buf_with_pattern(test_rq->bios_buffer,
  318 + buf_size, pattern);
  319 + test_rq->wr_rd_data_pattern = pattern;
  320 +
  321 + bio_ptr = test_rq->bios_buffer;
  322 + for (i = 0; i < num_bios; ++i) {
  323 + ret = blk_rq_map_kern(ptd->req_q, rq,
  324 + (void *)bio_ptr,
  325 + sizeof(unsigned int)*BIO_U32_SIZE,
  326 + GFP_KERNEL);
  327 + if (ret) {
  328 + test_pr_err("%s: blk_rq_map_kern returned error %d",
  329 + __func__, ret);
  330 + goto err;
  331 + }
  332 + bio_ptr += BIO_U32_SIZE;
  333 + }
  334 +
  335 + if (end_req_io)
  336 + rq->end_io = end_req_io;
  337 + else
  338 + rq->end_io = end_test_req;
  339 + rq->__sector = start_sec;
  340 + rq->cmd_type |= REQ_TYPE_FS;
  341 +
  342 + if (rq->bio) {
  343 + rq->bio->bi_sector = start_sec;
  344 + rq->bio->bi_end_io = end_test_bio;
  345 + bio = rq->bio;
  346 + while ((bio = bio->bi_next) != NULL)
  347 + bio->bi_end_io = end_test_bio;
  348 + }
  349 +
  350 + ptd->num_of_write_bios += num_bios;
  351 + test_rq->req_id = ptd->wr_rd_next_req_id++;
  352 +
  353 + test_rq->req_completed = false;
  354 + test_rq->req_result = -EINVAL;
  355 + test_rq->rq = rq;
  356 + test_rq->is_err_expected = is_err_expcted;
  357 + rq->elevator_private[0] = (void *)test_rq;
  358 +
  359 + test_pr_debug(
  360 + "%s: added request %d to the test requests list, buf_size=%d",
  361 + __func__, test_rq->req_id, buf_size);
  362 +
  363 + list_add_tail(&test_rq->queuelist, &ptd->test_queue);
  364 +
  365 + return 0;
  366 +err:
  367 + blk_put_request(rq);
  368 + kfree(test_rq->bios_buffer);
  369 + return -ENODEV;
  370 +}
  371 +EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
  372 +
  373 +/* Converts the testcase number into a string */
  374 +static char *get_test_case_str(struct test_data *td)
  375 +{
  376 + if (td->test_info.get_test_case_str_fn)
  377 + return td->test_info.get_test_case_str_fn(td);
  378 +
  379 + return "Unknown testcase";
  380 +}
  381 +
  382 +/*
  383 + * Verify that the test request data buffer includes the expected
  384 + * pattern
  385 + */
  386 +static int compare_buffer_to_pattern(struct test_request *test_rq)
  387 +{
  388 + int i = 0;
  389 + int num_of_dwords = test_rq->buf_size/sizeof(int);
  390 +
  391 + /* num_bytes should be aligned to sizeof(int) */
  392 + BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
  393 + BUG_ON(test_rq->bios_buffer == NULL);
  394 +
  395 + if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
  396 + return 0;
  397 +
  398 + if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
  399 + for (i = 0; i < num_of_dwords; i++) {
  400 + if (test_rq->bios_buffer[i] != i) {
  401 + test_pr_err(
  402 + "%s: wrong pattern 0x%x in index %d",
  403 + __func__, test_rq->bios_buffer[i], i);
  404 + return -EINVAL;
  405 + }
  406 + }
  407 + } else {
  408 + for (i = 0; i < num_of_dwords; i++) {
  409 + if (test_rq->bios_buffer[i] !=
  410 + test_rq->wr_rd_data_pattern) {
  411 + test_pr_err(
  412 + "%s: wrong pattern 0x%x in index %d",
  413 + __func__, test_rq->bios_buffer[i], i);
  414 + return -EINVAL;
  415 + }
  416 + }
  417 + }
  418 +
  419 + return 0;
  420 +}
  421 +
  422 +/*
  423 + * Determine if the test passed or failed.
  424 + * The function checks the test request completion value and calls
  425 + * check_testcase_result for result checking that are specific
  426 + * to a test case.
  427 + */
  428 +static int check_test_result(struct test_data *td)
  429 +{
  430 + struct test_request *test_rq;
  431 + struct request *rq;
  432 + int res = 0;
  433 + static int run;
  434 +
  435 + list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
  436 + rq = test_rq->rq;
  437 + if (!test_rq->req_completed) {
  438 + test_pr_err("%s: rq %d not completed", __func__,
  439 + test_rq->req_id);
  440 + res = -EINVAL;
  441 + goto err;
  442 + }
  443 +
  444 + if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
  445 + test_pr_err(
  446 + "%s: rq %d completed with err, not as expected",
  447 + __func__, test_rq->req_id);
  448 + res = -EINVAL;
  449 + goto err;
  450 + }
  451 + if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
  452 + test_pr_err("%s: rq %d succeeded, not as expected",
  453 + __func__, test_rq->req_id);
  454 + res = -EINVAL;
  455 + goto err;
  456 + }
  457 + if (rq_data_dir(test_rq->rq) == READ) {
  458 + res = compare_buffer_to_pattern(test_rq);
  459 + if (res) {
  460 + test_pr_err("%s: read pattern not as expected",
  461 + __func__);
  462 + res = -EINVAL;
  463 + goto err;
  464 + }
  465 + }
  466 + }
  467 +
  468 + if (td->test_info.check_test_result_fn) {
  469 + res = td->test_info.check_test_result_fn(td);
  470 + if (res)
  471 + goto err;
  472 + }
  473 +
  474 + test_pr_info("%s: %s, run# %03d, PASSED",
  475 + __func__, get_test_case_str(td), ++run);
  476 + td->test_result = TEST_PASSED;
  477 +
  478 + return 0;
  479 +err:
  480 + test_pr_err("%s: %s, run# %03d, FAILED",
  481 + __func__, get_test_case_str(td), ++run);
  482 + td->test_result = TEST_FAILED;
  483 + return res;
  484 +}
  485 +
  486 +/* Create and queue the required requests according to the test case */
  487 +static int prepare_test(struct test_data *td)
  488 +{
  489 + int ret = 0;
  490 +
  491 + if (td->test_info.prepare_test_fn) {
  492 + ret = td->test_info.prepare_test_fn(td);
  493 + return ret;
  494 + }
  495 +
  496 + return 0;
  497 +}
  498 +
  499 +/* Run the test */
  500 +static int run_test(struct test_data *td)
  501 +{
  502 + int ret = 0;
  503 +
  504 + if (td->test_info.run_test_fn) {
  505 + ret = td->test_info.run_test_fn(td);
  506 + return ret;
  507 + }
  508 +
  509 + /*
  510 + * Set the next_req pointer to the first request in the test requests
  511 + * list
  512 + */
  513 + if (!list_empty(&td->test_queue))
  514 + td->next_req = list_entry(td->test_queue.next,
  515 + struct test_request, queuelist);
  516 + __blk_run_queue(td->req_q);
  517 +
  518 + return 0;
  519 +}
  520 +
  521 +/* Free the allocated test requests, their requests and BIOs buffer */
  522 +static void free_test_requests(struct test_data *td)
  523 +{
  524 + struct test_request *test_rq;
  525 + struct bio *bio;
  526 +
  527 + while (!list_empty(&td->test_queue)) {
  528 + test_rq = list_entry(td->test_queue.next, struct test_request,
  529 + queuelist);
  530 + list_del_init(&test_rq->queuelist);
  531 + /*
  532 + * If the request was not completed we need to free its BIOs
  533 + * and remove it from the packed list
  534 + */
  535 + if (!test_rq->req_completed) {
  536 + test_pr_info(
  537 + "%s: Freeing memory of an uncompleted request",
  538 + __func__);
  539 + list_del_init(&test_rq->rq->queuelist);
  540 + while ((bio = test_rq->rq->bio) != NULL) {
  541 + test_rq->rq->bio = bio->bi_next;
  542 + bio_put(bio);
  543 + }
  544 + }
  545 + blk_put_request(test_rq->rq);
  546 + kfree(test_rq->bios_buffer);
  547 + kfree(test_rq);
  548 + }
  549 +}
  550 +
  551 +/*
  552 + * Do post test operations.
  553 + * Free the allocated test requests, their requests and BIOs buffer.
  554 + */
  555 +static int post_test(struct test_data *td)
  556 +{
  557 + int ret = 0;
  558 +
  559 + if (td->test_info.post_test_fn)
  560 + ret = td->test_info.post_test_fn(td);
  561 +
  562 + ptd->test_info.testcase = 0;
  563 + ptd->test_state = TEST_IDLE;
  564 +
  565 + free_test_requests(td);
  566 +
  567 + return ret;
  568 +}
  569 +
  570 +/*
  571 + * The timer verifies that the test will be completed even if we don't get
  572 + * the completion callback for all the requests.
  573 + */
  574 +static void test_timeout_handler(unsigned long data)
  575 +{
  576 + struct test_data *td = (struct test_data *)data;
  577 +
  578 + test_pr_info("%s: TIMEOUT timer expired", __func__);
  579 + td->test_state = TEST_COMPLETED;
  580 + wake_up(&td->wait_q);
  581 + return;
  582 +}
  583 +
  584 +static unsigned int get_timeout_msec(struct test_data *td)
  585 +{
  586 + if (td->test_info.timeout_msec)
  587 + return td->test_info.timeout_msec;
  588 + else
  589 + return TIMEOUT_TIMER_MS;
  590 +}
  591 +
  592 +/**
  593 + * test_iosched_start_test() - Prepares and runs the test.
  594 + * @t_info: the current test testcase and callbacks
  595 + * functions
  596 + *
  597 + * The function also checks the test result upon test completion
  598 + */
  599 +int test_iosched_start_test(struct test_info *t_info)
  600 +{
  601 + int ret = 0;
  602 + unsigned timeout_msec;
  603 + int counter = 0;
  604 + char *test_name = NULL;
  605 +
  606 + if (!ptd)
  607 + return -ENODEV;
  608 +
  609 + if (!t_info) {
  610 + ptd->test_result = TEST_FAILED;
  611 + return -EINVAL;
  612 + }
  613 +
  614 + do {
  615 + if (ptd->ignore_round)
  616 + /*
  617 + * We ignored the last run due to FS write requests.
  618 + * Sleep to allow those requests to be issued
  619 + */
  620 + msleep(2000);
  621 +
  622 + spin_lock(&ptd->lock);
  623 +
  624 + if (ptd->test_state != TEST_IDLE) {
  625 + test_pr_info(
  626 + "%s: Another test is running, try again later",
  627 + __func__);
  628 + spin_unlock(&ptd->lock);
  629 + return -EBUSY;
  630 + }
  631 +
  632 + if (ptd->start_sector == 0) {
  633 + test_pr_err("%s: Invalid start sector", __func__);
  634 + ptd->test_result = TEST_FAILED;
  635 + spin_unlock(&ptd->lock);
  636 + return -EINVAL;
  637 + }
  638 +
  639 + memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
  640 +
  641 + ptd->next_req = NULL;
  642 + ptd->test_result = TEST_NO_RESULT;
  643 + ptd->num_of_write_bios = 0;
  644 +
  645 + ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
  646 + ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
  647 +
  648 + ptd->ignore_round = false;
  649 + ptd->fs_wr_reqs_during_test = false;
  650 +
  651 + ptd->test_state = TEST_RUNNING;
  652 +
  653 + spin_unlock(&ptd->lock);
  654 +
  655 + timeout_msec = get_timeout_msec(ptd);
  656 + mod_timer(&ptd->timeout_timer, jiffies +
  657 + msecs_to_jiffies(timeout_msec));
  658 +
  659 + if (ptd->test_info.get_test_case_str_fn)
  660 + test_name = ptd->test_info.get_test_case_str_fn(ptd);
  661 + else
  662 + test_name = "Unknown testcase";
  663 + test_pr_info("%s: Starting test %s\n", __func__, test_name);
  664 +
  665 + ret = prepare_test(ptd);
  666 + if (ret) {
  667 + test_pr_err("%s: failed to prepare the test\n",
  668 + __func__);
  669 + goto error;
  670 + }
  671 +
  672 + ret = run_test(ptd);
  673 + if (ret) {
  674 + test_pr_err("%s: failed to run the test\n", __func__);
  675 + goto error;
  676 + }
  677 +
  678 + test_pr_info("%s: Waiting for the test completion", __func__);
  679 +
  680 + wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
  681 + del_timer_sync(&ptd->timeout_timer);
  682 +
  683 + ret = check_test_result(ptd);
  684 + if (ret) {
  685 + test_pr_err("%s: check_test_result failed\n",
  686 + __func__);
  687 + goto error;
  688 + }
  689 +
  690 + ret = post_test(ptd);
  691 + if (ret) {
  692 + test_pr_err("%s: post_test failed\n", __func__);
  693 + goto error;
  694 + }
  695 +
  696 + /*
  697 + * Wakeup the queue thread to fetch FS requests that might got
  698 + * postponded due to the test
  699 + */
  700 + __blk_run_queue(ptd->req_q);
  701 +
  702 + if (ptd->ignore_round)
  703 + test_pr_info(
  704 + "%s: Round canceled (Got wr reqs in the middle)",
  705 + __func__);
  706 +
  707 + if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
  708 + test_pr_info("%s: Too many rounds, did not succeed...",
  709 + __func__);
  710 + ptd->test_result = TEST_FAILED;
  711 + }
  712 +
  713 + } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
  714 +
  715 + if (ptd->test_result == TEST_PASSED)
  716 + return 0;
  717 + else
  718 + return -EINVAL;
  719 +
  720 +error:
  721 + post_test(ptd);
  722 + ptd->test_result = TEST_FAILED;
  723 + return ret;
  724 +}
  725 +EXPORT_SYMBOL(test_iosched_start_test);
  726 +
  727 +/**
  728 + * test_iosched_register() - register a block device test
  729 + * utility.
  730 + * @bdt: the block device test type to register
  731 + */
  732 +void test_iosched_register(struct blk_dev_test_type *bdt)
  733 +{
  734 + spin_lock(&blk_dev_test_list_lock);
  735 + list_add_tail(&bdt->list, &blk_dev_test_list);
  736 + spin_unlock(&blk_dev_test_list_lock);
  737 +}
  738 +EXPORT_SYMBOL_GPL(test_iosched_register);
  739 +
  740 +/**
  741 + * test_iosched_unregister() - unregister a block device test
  742 + * utility.
  743 + * @bdt: the block device test type to unregister
  744 + */
  745 +void test_iosched_unregister(struct blk_dev_test_type *bdt)
  746 +{
  747 + spin_lock(&blk_dev_test_list_lock);
  748 + list_del_init(&bdt->list);
  749 + spin_unlock(&blk_dev_test_list_lock);
  750 +}
  751 +EXPORT_SYMBOL_GPL(test_iosched_unregister);
  752 +
  753 +/**
  754 + * test_iosched_set_test_result() - Set the test
  755 + * result(PASS/FAIL)
  756 + * @test_result: the test result
  757 + */
  758 +void test_iosched_set_test_result(int test_result)
  759 +{
  760 + if (!ptd)
  761 + return;
  762 +
  763 + ptd->test_result = test_result;
  764 +}
  765 +EXPORT_SYMBOL(test_iosched_set_test_result);
  766 +
  767 +
  768 +/**
  769 + * test_iosched_set_ignore_round() - Set the ignore_round flag
  770 + * @ignore_round: A flag to indicate if this test round
  771 + * should be ignored and re-run
  772 + */
  773 +void test_iosched_set_ignore_round(bool ignore_round)
  774 +{
  775 + if (!ptd)
  776 + return;
  777 +
  778 + ptd->ignore_round = ignore_round;
  779 +}
  780 +EXPORT_SYMBOL(test_iosched_set_ignore_round);