From eb7a57bfa2fe39581eff055ae20b97e2d4897d46 Mon Sep 17 00:00:00 2001 From: sanoj Date: Thu, 25 Apr 2013 12:13:25 +0530 Subject: [PATCH] Coding style fixes Signed-off-by: sanoj --- Driver/enhanceio/eio.h | 16 ++---- Driver/enhanceio/eio_conf.c | 45 ++++++++--------- Driver/enhanceio/eio_fifo.c | 5 +- Driver/enhanceio/eio_lru.c | 7 ++- Driver/enhanceio/eio_main.c | 67 ++++++------------------- Driver/enhanceio/eio_mem.c | 5 +- Driver/enhanceio/eio_procfs.c | 93 +++++++++++++++++++---------------- Driver/enhanceio/eio_rand.c | 2 +- Driver/enhanceio/eio_subr.c | 59 +++++++++++----------- Driver/enhanceio/eio_ttc.c | 17 +++---- 10 files changed, 133 insertions(+), 183 deletions(-) diff --git a/Driver/enhanceio/eio.h b/Driver/enhanceio/eio.h index eef4bdc..11af6a7 100644 --- a/Driver/enhanceio/eio.h +++ b/Driver/enhanceio/eio.h @@ -380,9 +380,9 @@ struct eio_policy_and_name { static const struct eio_policy_and_name eio_policy_names[] = { - { CACHE_REPL_FIFO, "fifo" }, - { CACHE_REPL_LRU, "lru" }, - { CACHE_REPL_RANDOM, "rand" }, + { CACHE_REPL_FIFO, "fifo" }, + { CACHE_REPL_LRU, "lru" }, + { CACHE_REPL_RANDOM, "rand" }, }; @@ -1112,10 +1112,7 @@ extern sector_t eio_get_device_size(struct eio_bdev *); extern sector_t eio_get_device_start_sect(struct eio_bdev *); #endif /* __KERNEL__ */ -#define EIO_INIT_EVENT(ev) \ - do { \ - (ev)->process = NULL; \ - } while (0) +#define EIO_INIT_EVENT(ev) ((ev)->process = NULL) /*Assumes that the macro gets called under the same spinlock as in wait event*/ #define EIO_SET_EVENT_AND_UNLOCK(ev, sl, flags) \ @@ -1142,10 +1139,7 @@ extern sector_t eio_get_device_start_sect(struct eio_bdev *); (ev)->process = NULL; \ } while (0) -#define EIO_CLEAR_EVENT(ev) \ - do { \ - (ev)->process = NULL; \ - } while (0) +#define EIO_CLEAR_EVENT(ev) ((ev)->process = NULL) #include "eio_setlru.h" #include "eio_policy.h" diff --git a/Driver/enhanceio/eio_conf.c b/Driver/enhanceio/eio_conf.c index 969a047..245dfb3 100644 --- a/Driver/enhanceio/eio_conf.c +++ b/Driver/enhanceio/eio_conf.c @@ -44,7 +44,7 @@ #define KMEM_DMC_BIO_PAIR "eio-dmc-bio-pair" /* #define KMEM_CACHE_PENDING_JOB "eio-pending-jobs" */ -static struct cache_c *cache_list_head = NULL; +static struct cache_c *cache_list_head; struct work_struct _kcached_wq; static struct kmem_cache *_job_cache; @@ -80,7 +80,7 @@ static int eio_notify_ssd_rm(struct notifier_block *nb, unsigned long action, static struct notifier_block eio_reboot_notifier = { .notifier_call = eio_notify_reboot, .next = NULL, - .priority = INT_MAX, /* should be > ssd pri's and disk dev pri's */ + .priority = INT_MAX, /* should be > ssd pri's and disk dev pri's */ }; static struct notifier_block eio_ssd_rm_notifier = { @@ -662,7 +662,8 @@ static int eio_md_create(struct cache_c *dmc, int force, int cold) dmc->md_sectors += EIO_EXTRA_SECTORS(dmc->cache_dev_start_sect, dmc->md_sectors); - if ((error = eio_mem_init(dmc)) == -1) { + error = eio_mem_init(dmc); + if (error == -1) { ret = -EINVAL; goto free_header; } @@ -711,11 +712,9 @@ static int eio_md_create(struct cache_c *dmc, int force, int cold) */ if (!CACHE_SSD_ADD_INPROG_IS_SET(dmc)) { if (EIO_MD8(dmc)) - dmc->cache_md8 = - (struct cacheblock_md8 *)vmalloc((size_t)order); + dmc->cache_md8 = vmalloc((size_t)order); else - dmc->cache = - (struct cacheblock *)vmalloc((size_t)order); + dmc->cache = vmalloc((size_t)order); if ((EIO_MD8(dmc) && !dmc->cache_md8) || (!EIO_MD8(dmc) && !dmc->cache)) { pr_err @@ -1148,7 +1147,8 @@ static int eio_md_load(struct cache_c *dmc) dmc->sysctl_active.autoclean_threshold = le32_to_cpu(header->sbf.autoclean_threshold); - if ((i = eio_mem_init(dmc)) == -1) { + i = eio_mem_init(dmc); + if (i == -1) { pr_err("eio_md_load: Failed to initialize memory."); ret = -EINVAL; goto free_header; @@ -1171,10 +1171,9 @@ static int eio_md_load(struct cache_c *dmc) dmc->assoc, dmc->block_size << SECTOR_SHIFT); if (EIO_MD8(dmc)) - dmc->cache_md8 = - (struct cacheblock_md8 *)vmalloc((size_t)order); + dmc->cache_md8 = vmalloc((size_t)order); else - dmc->cache = (struct cacheblock *)vmalloc((size_t)order); + dmc->cache = vmalloc((size_t)order); if ((EIO_MD8(dmc) && !dmc->cache_md8) || (!EIO_MD8(dmc) && !dmc->cache)) { pr_err("md_load: Unable to allocate memory"); @@ -1472,7 +1471,7 @@ int eio_cache_create(struct cache_rec_short *cache) fmode_t mode = (FMODE_READ | FMODE_WRITE); char *strerr = NULL; - dmc = (struct cache_c *)kzalloc(sizeof(*dmc), GFP_KERNEL); + dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); if (dmc == NULL) { strerr = "Failed to allocate memory for cache context"; error = -ENOMEM; @@ -1493,8 +1492,8 @@ int eio_cache_create(struct cache_rec_short *cache) strerr = "Failed to lookup source device"; goto bad1; } - if ((dmc->disk_size = - eio_to_sector(eio_get_device_size(dmc->disk_dev))) >= EIO_MAX_SECTOR) { + dmc->disk_size = eio_to_sector(eio_get_device_size(dmc->disk_dev)); + if (dmc->disk_size >= EIO_MAX_SECTOR) { strerr = "Source device too big to support"; error = -EFBIG; goto bad2; @@ -1794,7 +1793,7 @@ int eio_cache_create(struct cache_rec_short *cache) goto bad5; } - dmc->cache_sets = (struct cache_set *)vmalloc((size_t)order); + dmc->cache_sets = vmalloc((size_t)order); if (!dmc->cache_sets) { strerr = "Failed to allocate memory"; error = -ENOMEM; @@ -2255,9 +2254,8 @@ int eio_allocate_wb_resources(struct cache_c *dmc) /* Data page allocations are done in terms of "bio_vec" structures */ iosize = (dmc->block_size * dmc->assoc) << SECTOR_SHIFT; nr_bvecs = IO_BVEC_COUNT(iosize, dmc->block_size); - dmc->clean_dbvecs = - (struct bio_vec *)kmalloc(sizeof(struct bio_vec) * nr_bvecs, - GFP_KERNEL); + dmc->clean_dbvecs = kmalloc(sizeof(struct bio_vec) * nr_bvecs, + GFP_KERNEL); if (dmc->clean_dbvecs == NULL) { pr_err("cache_create: Failed to allocated memory.\n"); ret = -ENOMEM; @@ -2273,9 +2271,8 @@ int eio_allocate_wb_resources(struct cache_c *dmc) /* Metadata page allocations are done in terms of pages only */ iosize = dmc->assoc * sizeof(struct flash_cacheblock); nr_pages = IO_PAGE_COUNT(iosize); - dmc->clean_mdpages = - (struct page **)kmalloc(sizeof(struct page *) * nr_pages, - GFP_KERNEL); + dmc->clean_mdpages = kmalloc(sizeof(struct page *) * nr_pages, + GFP_KERNEL); if (dmc->clean_mdpages == NULL) { pr_err("cache_create: Failed to allocated memory.\n"); ret = -ENOMEM; @@ -2481,8 +2478,8 @@ eio_notify_ssd_rm(struct notifier_block *nb, unsigned long action, void *data) if (!scsi_is_sdev_device(dev)) return 0; - - if ((device_name = dev_name(dev)) == NULL) + device_name = dev_name(dev); + if (device_name == NULL) return 0; len = strlen(device_name); @@ -2562,7 +2559,7 @@ static int __init eio_init(void) INIT_WORK(&_kcached_wq, eio_do_work); eio_module_procfs_init(); - eio_control = kmalloc(sizeof *eio_control, GFP_KERNEL); + eio_control = kmalloc(sizeof(*eio_control), GFP_KERNEL); if (eio_control == NULL) { pr_err("init: Cannot allocate memory for eio_control"); (void)eio_delete_misc_device(); diff --git a/Driver/enhanceio/eio_fifo.c b/Driver/enhanceio/eio_fifo.c index 13fed89..117fe06 100644 --- a/Driver/enhanceio/eio_fifo.c +++ b/Driver/enhanceio/eio_fifo.c @@ -77,8 +77,7 @@ int eio_fifo_cache_sets_init(struct eio_policy *p_ops) order = (dmc->size >> dmc->consecutive_shift) * sizeof(struct eio_fifo_cache_set); - dmc->sp_cache_set = - (struct eio_fifo_cache_set *)vmalloc((size_t)order); + dmc->sp_cache_set = vmalloc((size_t)order); if (dmc->sp_cache_set == NULL) return -ENOMEM; @@ -178,7 +177,7 @@ struct eio_policy *eio_fifo_instance_init(void) { struct eio_policy *new_instance; - new_instance = (struct eio_policy *)vmalloc(sizeof(struct eio_policy)); + new_instance = vmalloc(sizeof(struct eio_policy)); if (new_instance == NULL) { pr_err("ssdscache_fifo_instance_init: vmalloc failed"); return NULL; diff --git a/Driver/enhanceio/eio_lru.c b/Driver/enhanceio/eio_lru.c index f7d6030..e213301 100644 --- a/Driver/enhanceio/eio_lru.c +++ b/Driver/enhanceio/eio_lru.c @@ -90,7 +90,7 @@ int eio_lru_cache_sets_init(struct eio_policy *p_ops) (dmc->size >> dmc->consecutive_shift) * sizeof(struct eio_lru_cache_set); - dmc->sp_cache_set = (struct eio_lru_cache_set *)vmalloc((size_t)order); + dmc->sp_cache_set = vmalloc((size_t)order); if (dmc->sp_cache_set == NULL) return -ENOMEM; @@ -115,8 +115,7 @@ int eio_lru_cache_blk_init(struct eio_policy *p_ops) order = dmc->size * sizeof(struct eio_lru_cache_block); - dmc->sp_cache_blk = - (struct eio_lru_cache_block *)vmalloc((size_t)order); + dmc->sp_cache_blk = vmalloc((size_t)order); if (dmc->sp_cache_blk == NULL) return -ENOMEM; @@ -130,7 +129,7 @@ struct eio_policy *eio_lru_instance_init(void) { struct eio_policy *new_instance; - new_instance = (struct eio_policy *)vmalloc(sizeof(struct eio_policy)); + new_instance = vmalloc(sizeof(struct eio_policy)); if (new_instance == NULL) { pr_err("eio_lru_instance_init: vmalloc failed"); return NULL; diff --git a/Driver/enhanceio/eio_main.c b/Driver/enhanceio/eio_main.c index 5ac5296..c102eaf 100644 --- a/Driver/enhanceio/eio_main.c +++ b/Driver/enhanceio/eio_main.c @@ -135,7 +135,7 @@ eio_io_async_bvec(struct cache_c *dmc, struct eio_io_region *where, int rw, struct eio_io_request req; int error = 0; - memset((char *)&req, 0, sizeof req); + memset((char *)&req, 0, sizeof(req)); if (unlikely(CACHE_DEGRADED_IS_SET(dmc))) { if (where->bdev != dmc->disk_dev->bdev) { @@ -462,9 +462,9 @@ static void eio_post_io_callback(struct work_struct *work) if (unlikely(error)) dmc->eio_errors.ssd_write_errors++; if (!(EIO_CACHE_STATE_GET(dmc, index) & CACHEWRITEINPROG)) { - printk(KERN_DEBUG - "DISKWRITEINPROG absent in READFILL sector %llu io size %u\n", - (unsigned long long)ebio->eb_sector, + pr_debug("DISKWRITEINPROG absent in READFILL \ + sector %llu io size %u\n", + (unsigned long long)ebio->eb_sector, ebio->eb_size); } callendio = 1; @@ -1772,42 +1772,6 @@ eio_invalidate_sanity_check(struct cache_c *dmc, u_int64_t iosector, return 0; } -#if defined (VMCACHE) -int -eio_invalidate_sector_range(char *cache_name, u_int64_t iosector, - u_int64_t num_sectors) -{ - struct cache_c *dmc; - int ret; - - dmc = eio_find_cache(cache_name); - - if (dmc == NULL) { - pr_err - ("invalidate_sector_range: cache object with name=%s does not exist.", - cache_name); - return -EINVAL; - } - - ret = eio_invalidate_sanity_check(dmc, iosector, &num_sectors); - - if (ret == 0) - eio_inval_range(dmc, iosector, (unsigned)to_bytes(num_sectors)); - else - return ret; - - if (CACHE_VERBOSE_IS_SET(dmc)) { - pr_info - ("eio_inval_range: Invalidated sector range from sector=%lu to sector=%lu", - (long unsigned int)iosector, - (long unsigned int)num_sectors); - } - - return ret; -} -EXPORT_SYMBOL(eio_invalidate_sector_range); -#endif /* VMCACHE */ - void eio_inval_range(struct cache_c *dmc, sector_t iosector, unsigned iosize) { u_int32_t bset; @@ -2510,7 +2474,7 @@ int eio_map(struct cache_c *dmc, struct request_queue *rq, struct bio *bio) EIO_ASSERT(bio->bi_idx == 0); - pr_debug("this needs to be removed immediately \n"); + pr_debug("this needs to be removed immediately\n"); if (bio_rw_flagged(bio, REQ_DISCARD)) { pr_debug @@ -2599,7 +2563,8 @@ int eio_map(struct cache_c *dmc, struct request_queue *rq, struct bio *bio) * lock on the cache set for app I/Os and exclusive * lock on the cache set for clean I/Os. */ - if ((ret = eio_acquire_set_locks(dmc, bc)) != 0) { + ret = eio_acquire_set_locks(dmc, bc); + if (ret) { bio_endio(bio, ret); kfree(bc); return DM_MAPIO_SUBMITTED; @@ -2620,13 +2585,9 @@ int eio_map(struct cache_c *dmc, struct request_queue *rq, struct bio *bio) else { while (biosize) { iosize = eio_get_iosize(dmc, snum, biosize); - - if (IS_ERR - (ebio = - eio_new_ebio(dmc, bio, - &residual_biovec, snum, - iosize, bc, - EB_SUBORDINATE_IO))) { + ebio = eio_new_ebio(dmc, bio, &residual_biovec, snum, + iosize, bc, EB_SUBORDINATE_IO); + if (IS_ERR(ebio)) { bc->bc_error = -ENOMEM; break; } @@ -3098,7 +3059,7 @@ void eio_clean_all(struct cache_c *dmc) } eio_clean_set(dmc, (index_t)(atomic_read(&dmc->clean_index)), - /* whole */ 1, /* force */ 1); + /* whole */ 1, /* force */ 1); } spin_lock_irqsave(&dmc->cache_spin_lock, flags); @@ -3301,9 +3262,9 @@ eio_clean_set(struct cache_c *dmc, index_t set, int whole, int force) for (i = start_index; i < end_index; i++) { if (EIO_CACHE_STATE_GET(dmc, i) == CLEAN_INPROG) { - for (j = i; (j < end_index) && - (EIO_CACHE_STATE_GET(dmc, j) == CLEAN_INPROG); - j++) ; + for (j = i; ((j < end_index) && + (EIO_CACHE_STATE_GET(dmc, j) == CLEAN_INPROG)); + j++); blkindex = (i - start_index); total = (j - i); diff --git a/Driver/enhanceio/eio_mem.c b/Driver/enhanceio/eio_mem.c index e3a1144..0e9093b 100644 --- a/Driver/enhanceio/eio_mem.c +++ b/Driver/enhanceio/eio_mem.c @@ -66,9 +66,8 @@ int eio_mem_init(struct cache_c *dmc) * its corresponding mask value. */ dmc->num_sets = (u_int32_t)num_sets_64; - for (dmc->num_sets_bits = 0; - (dmc->num_sets >> dmc->num_sets_bits) != 0; - dmc->num_sets_bits++) ; + for (dmc->num_sets_bits = 0; (dmc->num_sets >> dmc->num_sets_bits); + dmc->num_sets_bits++); dmc->num_sets_mask = ULLONG_MAX >> (64 - dmc->num_sets_bits); diff --git a/Driver/enhanceio/eio_procfs.c b/Driver/enhanceio/eio_procfs.c index 782e4d8..76fe80a 100644 --- a/Driver/enhanceio/eio_procfs.c +++ b/Driver/enhanceio/eio_procfs.c @@ -951,35 +951,35 @@ static int eio_version_open(struct inode *inode, struct file *file); static int eio_config_show(struct seq_file *seq, void *v); static int eio_config_open(struct inode *inode, struct file *file); -static struct file_operations eio_version_operations = { +static const struct file_operations eio_version_operations = { .open = eio_version_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; -static struct file_operations eio_stats_operations = { +static const struct file_operations eio_stats_operations = { .open = eio_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; -static struct file_operations eio_errors_operations = { +static const struct file_operations eio_errors_operations = { .open = eio_errors_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; -static struct file_operations eio_iosize_hist_operations = { +static const struct file_operations eio_iosize_hist_operations = { .open = eio_iosize_hist_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; -static struct file_operations eio_config_operations = { +static const struct file_operations eio_config_operations = { .open = eio_config_open, .read = seq_read, .llseek = seq_lseek, @@ -1016,16 +1016,16 @@ static struct sysctl_table_dir { ctl_table dir[1 + 1]; ctl_table root[1 + 1]; } sysctl_template_dir = { - .vars ={ - }, .dev ={ - }, .dir ={ + .vars = { + }, .dev = { + }, .dir = { { .procname = PROC_SYS_DIR_NAME, .maxlen = 0, .mode = S_IRUGO | S_IXUGO, .child = sysctl_template_dir.dev, }, - }, .root ={ + }, .root = { { .procname = PROC_SYS_ROOT_NAME, .maxlen = 0, @@ -1044,7 +1044,7 @@ static struct sysctl_table_common { ctl_table dir[1 + 1]; ctl_table root[1 + 1]; } sysctl_template_common = { - .vars ={ + .vars = { { /* 1 */ .procname = "zero_stats", .maxlen = sizeof(int), @@ -1052,7 +1052,8 @@ static struct sysctl_table_common { .proc_handler = &eio_zerostats_sysctl, }, { /* 2 */ .procname = "mem_limit_pct", - .maxlen = sizeof(int), .mode= 0644, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = &eio_mem_limit_pct_sysctl, }, { /* 3 */ .procname = "control", @@ -1060,21 +1061,21 @@ static struct sysctl_table_common { .mode = 0644, .proc_handler = &eio_control_sysctl, }, - }, .dev ={ + }, .dev = { { .procname = PROC_SYS_CACHE_NAME, .maxlen = 0, .mode = S_IRUGO | S_IXUGO, .child = sysctl_template_common.vars, }, - }, .dir ={ + }, .dir = { { .procname = PROC_SYS_DIR_NAME, .maxlen = 0, .mode = S_IRUGO | S_IXUGO, .child = sysctl_template_common.dev, }, - }, .root ={ + }, .root = { { .procname = PROC_SYS_ROOT_NAME, .maxlen = 0, @@ -1093,7 +1094,7 @@ static struct sysctl_table_writeback { ctl_table dir[1 + 1]; ctl_table root[1 + 1]; } sysctl_template_writeback = { - .vars ={ + .vars = { { /* 1 */ .procname = "do_clean", .maxlen = sizeof(int), @@ -1135,26 +1136,30 @@ static struct sysctl_table_writeback { } , } - , .dev ={ + , .dev = { { - .procname = PROC_SYS_CACHE_NAME, .maxlen = 0, .mode = - S_IRUGO | S_IXUGO, .child= - sysctl_template_writeback.vars, + .procname = PROC_SYS_CACHE_NAME, + .maxlen = 0, + .mode = S_IRUGO | S_IXUGO, + .child = sysctl_template_writeback.vars, } , } - , .dir ={ + , .dir = { { - .procname = PROC_SYS_DIR_NAME, .maxlen = 0, .mode = - S_IRUGO | S_IXUGO, .child= - sysctl_template_writeback.dev, + .procname = PROC_SYS_DIR_NAME, + .maxlen = 0, + .mode = S_IRUGO | S_IXUGO, + .child = sysctl_template_writeback.dev, } , } - , .root ={ + , .root = { { - .procname = PROC_SYS_ROOT_NAME, .maxlen = 0, .mode = - 0555, .child= sysctl_template_writeback.dir, + .procname = PROC_SYS_ROOT_NAME, + .maxlen = 0, + .mode = 0555, + .child = sysctl_template_writeback.dir, } , } @@ -1169,8 +1174,8 @@ static struct sysctl_table_invalidate { ctl_table dir[1 + 1]; ctl_table root[1 + 1]; } sysctl_template_invalidate = { - .vars ={ - { /* 1 */ + .vars = { + { /* 1 */ .procname = "invalidate", .maxlen = sizeof(u_int64_t), .mode = 0644, @@ -1178,23 +1183,25 @@ static struct sysctl_table_invalidate { } , } - , .dev ={ + , .dev = { { - .procname = PROC_SYS_CACHE_NAME, .maxlen = 0, .mode = - S_IRUGO | S_IXUGO, .child= - sysctl_template_invalidate.vars, + .procname = PROC_SYS_CACHE_NAME, + .maxlen = 0, + .mode = S_IRUGO | S_IXUGO, + .child = sysctl_template_invalidate.vars, } , } - , .dir ={ + , .dir = { { - .procname = PROC_SYS_DIR_NAME, .maxlen = 0, .mode = - S_IRUGO | S_IXUGO, .child= - sysctl_template_invalidate.dev, + .procname = PROC_SYS_DIR_NAME, + .maxlen = 0, + .mode = S_IRUGO | S_IXUGO, + .child = sysctl_template_invalidate.dev, } , } - , .root ={ + , .root = { { .procname = PROC_SYS_ROOT_NAME, .maxlen = 0, @@ -1486,7 +1493,7 @@ static void eio_sysctl_register_dir(void) struct sysctl_table_dir *dir; dir = - kmemdup(&sysctl_template_dir, sizeof sysctl_template_dir, + kmemdup(&sysctl_template_dir, sizeof(sysctl_template_dir), GFP_KERNEL); if (unlikely(dir == NULL)) { pr_err("Failed to allocate memory for dir sysctl"); @@ -1525,7 +1532,7 @@ static void eio_sysctl_register_common(struct cache_c *dmc) struct sysctl_table_common *common; common = - kmemdup(&sysctl_template_common, sizeof sysctl_template_common, + kmemdup(&sysctl_template_common, sizeof(sysctl_template_common), GFP_KERNEL); if (common == NULL) { pr_err("Failed to allocate memory for common sysctl"); @@ -1580,7 +1587,7 @@ static void eio_sysctl_register_writeback(struct cache_c *dmc) writeback = kmemdup(&sysctl_template_writeback, - sizeof sysctl_template_writeback, GFP_KERNEL); + sizeof(sysctl_template_writeback), GFP_KERNEL); if (writeback == NULL) { pr_err("Failed to allocate memory for writeback sysctl"); return; @@ -1634,7 +1641,7 @@ static void eio_sysctl_register_invalidate(struct cache_c *dmc) invalidate = kmemdup(&sysctl_template_invalidate, - sizeof sysctl_template_invalidate, GFP_KERNEL); + sizeof(sysctl_template_invalidate), GFP_KERNEL); if (invalidate == NULL) { pr_err("Failed to allocate memory for invalidate sysctl"); return; @@ -1889,8 +1896,8 @@ static int eio_version_show(struct seq_file *seq, void *v) { char buf[128]; - memset(buf, 0, sizeof buf); - eio_version_query(sizeof buf, buf); + memset(buf, 0, sizeof(buf)); + eio_version_query(sizeof(buf), buf); seq_printf(seq, "%s\n", buf); return 0; diff --git a/Driver/enhanceio/eio_rand.c b/Driver/enhanceio/eio_rand.c index 0b41e3d..6b05756 100644 --- a/Driver/enhanceio/eio_rand.c +++ b/Driver/enhanceio/eio_rand.c @@ -139,7 +139,7 @@ struct eio_policy *eio_rand_instance_init(void) { struct eio_policy *new_instance; - new_instance = (struct eio_policy *)vmalloc(sizeof(struct eio_policy)); + new_instance = vmalloc(sizeof(struct eio_policy)); if (new_instance == NULL) { pr_err("ssdscache_rand_instance_init: vmalloc failed"); return NULL; diff --git a/Driver/enhanceio/eio_subr.c b/Driver/enhanceio/eio_subr.c index 769b568..1166144 100644 --- a/Driver/enhanceio/eio_subr.c +++ b/Driver/enhanceio/eio_subr.c @@ -90,7 +90,7 @@ void eio_push_ssdread_failures(struct kcached_job *job) } static void -eio_process_jobs(struct list_head *jobs, void (*fn)(struct kcached_job *)) +eio_process_jobs(struct list_head *jobs, void (*fn) (struct kcached_job *)) { struct kcached_job *job; @@ -114,14 +114,13 @@ static void eio_process_ssd_rm_list(void) while (!list_empty(&ssd_rm_list)) { ssd_list_ptr = - list_entry(ssd_rm_list.next, struct ssd_rm_list, list); + list_entry(ssd_rm_list.next, struct ssd_rm_list, list); if (ssd_list_ptr->action == BUS_NOTIFY_DEL_DEVICE) eio_suspend_caching(ssd_list_ptr->dmc, ssd_list_ptr->note); else - pr_err - ("eio_process_ssd_rm_list: Unknown status (0x%x)\n", - ssd_list_ptr->action); + pr_err("eio_process_ssd_rm_list:" + "Unknown status (0x%x)\n", ssd_list_ptr->action); list_del(&ssd_list_ptr->list); kfree(ssd_list_ptr); } @@ -165,16 +164,16 @@ struct kcached_job *eio_new_job(struct cache_c *dmc, struct eio_bio *bio, job->job_io_regions.cache.bdev = dmc->cache_dev->bdev; if (bio) { job->job_io_regions.cache.sector = - (index << dmc->block_shift) + dmc->md_sectors + - (bio->eb_sector - - EIO_ROUND_SECTOR(dmc, bio->eb_sector)); + (index << dmc->block_shift) + dmc->md_sectors + + (bio->eb_sector - + EIO_ROUND_SECTOR(dmc, bio->eb_sector)); EIO_ASSERT(eio_to_sector(bio->eb_size) <= dmc->block_size); job->job_io_regions.cache.count = - eio_to_sector(bio->eb_size); + eio_to_sector(bio->eb_size); } else { job->job_io_regions.cache.sector = - (index << dmc->block_shift) + dmc->md_sectors; + (index << dmc->block_shift) + dmc->md_sectors; job->job_io_regions.cache.count = dmc->block_size; } } @@ -304,7 +303,7 @@ void eio_suspend_caching(struct cache_c *dmc, enum dev_notifier note) spin_lock_irqsave(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); if (dmc->mode != CACHE_MODE_WB && CACHE_FAILED_IS_SET(dmc)) { - pr_err("suspend caching: Cache " \ + pr_err("suspend caching: Cache " "%s is already in FAILED state\n", dmc->cache_name); spin_unlock_irqrestore(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); @@ -319,7 +318,7 @@ void eio_suspend_caching(struct cache_c *dmc, enum dev_notifier note) dmc->cache_flags |= CACHE_FLAGS_FAILED; dmc->eio_errors.no_source_dev = 1; atomic64_set(&dmc->eio_stats.cached_blocks, 0); - pr_info("suspend_caching: Source Device Removed." \ + pr_info("suspend_caching: Source Device Removed." "Cache \"%s\" is in Failed mode.\n", dmc->cache_name); break; case NOTIFY_SSD_REMOVED: @@ -332,25 +331,23 @@ void eio_suspend_caching(struct cache_c *dmc, enum dev_notifier note) */ EIO_ASSERT(!CACHE_DEGRADED_IS_SET(dmc)); dmc->cache_flags |= CACHE_FLAGS_FAILED; - pr_info - ("suspend caching: SSD Device Removed." \ - "Cache \"%s\" is in Failed mode.\n", - dmc->cache_name); + pr_info("suspend caching: SSD Device Removed.\ + Cache \"%s\" is in Failed mode.\n", dmc->cache_name); } else { if (CACHE_DEGRADED_IS_SET(dmc) || CACHE_SSD_ADD_INPROG_IS_SET(dmc)) { spin_unlock_irqrestore(&dmc->cache_spin_lock, - dmc->cache_spin_lock_flags); - pr_err("suspend_caching: Cache " \ - "\"%s\" is either degraded" \ - "or device add in progress, exiting.\n", - dmc->cache_name); + dmc-> + cache_spin_lock_flags); + pr_err("suspend_caching: Cache \ + \"%s\" is either degraded \ + or device add in progress, exiting.\n", dmc->cache_name); return; } dmc->cache_flags |= CACHE_FLAGS_DEGRADED; atomic64_set(&dmc->eio_stats.cached_blocks, 0); - pr_info("suspend caching: Cache \"%s\" " \ - "is in Degraded mode.\n", dmc->cache_name); + pr_info("suspend caching: Cache \"%s\" \ + is in Degraded mode.\n", dmc->cache_name); } dmc->eio_errors.no_cache_dev = 1; break; @@ -374,7 +371,7 @@ void eio_resume_caching(struct cache_c *dmc, char *dev) int r; if (dmc == NULL || dev == NULL) { - pr_err("resume_caching: Null device or" \ + pr_err("resume_caching: Null device or" "cache instance when resuming caching.\n"); return; } @@ -385,7 +382,7 @@ void eio_resume_caching(struct cache_c *dmc, char *dev) spin_lock_irqsave(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); if (CACHE_STALE_IS_SET(dmc)) { - pr_err("eio_resume_caching: Hard Failure Detected!!" \ + pr_err("eio_resume_caching: Hard Failure Detected!!" "Cache \"%s\" can not be resumed.", dmc->cache_name); spin_unlock_irqrestore(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); @@ -396,8 +393,8 @@ void eio_resume_caching(struct cache_c *dmc, char *dev) if (dmc->mode == CACHE_MODE_WB) { if (!CACHE_FAILED_IS_SET(dmc) || CACHE_SRC_IS_ABSENT(dmc) || CACHE_SSD_ADD_INPROG_IS_SET(dmc)) { - pr_debug("eio_resume_caching: Cache not in Failed " \ - "state or Source is absent" \ + pr_debug("eio_resume_caching: Cache not in Failed " + "state or Source is absent" "or SSD add already in progress for cache \"%s\".\n", dmc->cache_name); spin_unlock_irqrestore(&dmc->cache_spin_lock, @@ -408,9 +405,9 @@ void eio_resume_caching(struct cache_c *dmc, char *dev) /* sanity check for WT or RO cache. */ if (CACHE_FAILED_IS_SET(dmc) || !CACHE_DEGRADED_IS_SET(dmc) || CACHE_SSD_ADD_INPROG_IS_SET(dmc)) { - pr_err("resume_caching: Cache \"%s\" " \ - "is either in failed mode or " \ - "cache device add in progress, ignoring. \n ", + pr_err("resume_caching: Cache \"%s\" " + "is either in failed mode or " + "cache device add in progress, ignoring.\n ", dmc->cache_name); spin_unlock_irqrestore(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); @@ -424,7 +421,7 @@ void eio_resume_caching(struct cache_c *dmc, char *dev) r = eio_ctr_ssd_add(dmc, dev); if (r) { /* error */ - pr_debug(" resume caching: returned error: %d \n ", r); + pr_debug(" resume caching: returned error: %d\n ", r); spin_lock_irqsave(&dmc->cache_spin_lock, dmc->cache_spin_lock_flags); dmc->cache_flags &= ~CACHE_FLAGS_SSD_ADD_INPROG; diff --git a/Driver/enhanceio/eio_ttc.c b/Driver/enhanceio/eio_ttc.c index 7048936..723788e 100644 --- a/Driver/enhanceio/eio_ttc.c +++ b/Driver/enhanceio/eio_ttc.c @@ -62,7 +62,7 @@ static int eio_release(struct inode *ip, struct file *filp) return 0; } -static struct file_operations eio_fops = { +static const struct file_operations eio_fops = { .open = eio_open, .release = eio_release, .unlocked_ioctl = eio_ioctl, @@ -120,7 +120,7 @@ int eio_ttc_get_device(const char *path, fmode_t mode, struct eio_bdev **result) * bd_claim_by_disk(bdev, charptr, gendisk) */ - eio_bdev = (struct eio_bdev *)kzalloc(sizeof(*eio_bdev), GFP_KERNEL); + eio_bdev = kzalloc(sizeof(*eio_bdev), GFP_KERNEL); if (eio_bdev == NULL) { blkdev_put(bdev, mode); return -ENOMEM; @@ -317,8 +317,6 @@ int eio_ttc_deactivate(struct cache_c *dmc, int force) if ((dmc->dev_info == EIO_DEV_WHOLE_DISK) || (found_partitions == 0)) rq->make_request_fn = dmc->origmfn; - else { - } list_del_init(&dmc->cachelist); up_write(&eio_ttc_lock[index]); @@ -759,11 +757,9 @@ static int eio_dispatch_io(struct cache_c *dmc, struct eio_io_region *where, } atomic_inc(&io->count); - if (hddio) { + if (hddio) dmc->origmfn(bdev_get_queue(bio->bi_bdev), bio); - if (ret) { - } - } else + else submit_bio(rw, bio); } while (remaining); @@ -1488,7 +1484,7 @@ int eio_reboot_handling(void) for (i = 0; i < EIO_HASHTBL_SIZE; i++) { down_write(&eio_ttc_lock[i]); list_for_each_entry(dmc, &eio_ttc_list[i], cachelist) { - + kfree(tempdmc); tempdmc = NULL; if (unlikely(CACHE_FAILED_IS_SET(dmc)) || @@ -1616,7 +1612,8 @@ static int eio_overlap_split_bio(struct request_queue *q, struct bio *bio) bvec_consumed = 0; for (i = 0; i < nbios; i++) { bioptr[i] = - eio_split_new_bio(bio, bc, &bvec_idx, &bvec_consumed, snum); + eio_split_new_bio(bio, bc, &bvec_idx, + &bvec_consumed, snum); if (!bioptr[i]) break; snum++;