Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
Browse files Browse the repository at this point in the history
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: fix btrfs fallocate oops and deadlock
  Btrfs: use the right node in reada_for_balance
  Btrfs: fix oops on page->mapping->host during writepage
  Btrfs: add a priority queue to the async thread helpers
  Btrfs: use WRITE_SYNC for synchronous writes
  • Loading branch information
torvalds committed Apr 21, 2009
2 parents c19c6c3 + 546888d commit ccc5ff9
Show file tree
Hide file tree
Showing 10 changed files with 272 additions and 83 deletions.
60 changes: 47 additions & 13 deletions fs/btrfs/async-thread.c
Expand Up @@ -25,6 +25,7 @@
#define WORK_QUEUED_BIT 0
#define WORK_DONE_BIT 1
#define WORK_ORDER_DONE_BIT 2
#define WORK_HIGH_PRIO_BIT 3

/*
* container for the kthread task pointer and the list of pending work
Expand All @@ -36,6 +37,7 @@ struct btrfs_worker_thread {

/* list of struct btrfs_work that are waiting for service */
struct list_head pending;
struct list_head prio_pending;

/* list of worker threads from struct btrfs_workers */
struct list_head worker_list;
Expand Down Expand Up @@ -103,10 +105,16 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,

spin_lock_irqsave(&workers->lock, flags);

while (!list_empty(&workers->order_list)) {
work = list_entry(workers->order_list.next,
struct btrfs_work, order_list);

while (1) {
if (!list_empty(&workers->prio_order_list)) {
work = list_entry(workers->prio_order_list.next,
struct btrfs_work, order_list);
} else if (!list_empty(&workers->order_list)) {
work = list_entry(workers->order_list.next,
struct btrfs_work, order_list);
} else {
break;
}
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;

Expand Down Expand Up @@ -143,8 +151,14 @@ static int worker_loop(void *arg)
do {
spin_lock_irq(&worker->lock);
again_locked:
while (!list_empty(&worker->pending)) {
cur = worker->pending.next;
while (1) {
if (!list_empty(&worker->prio_pending))
cur = worker->prio_pending.next;
else if (!list_empty(&worker->pending))
cur = worker->pending.next;
else
break;

work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list);
clear_bit(WORK_QUEUED_BIT, &work->flags);
Expand All @@ -163,7 +177,6 @@ static int worker_loop(void *arg)

spin_lock_irq(&worker->lock);
check_idle_worker(worker);

}
if (freezing(current)) {
worker->working = 0;
Expand All @@ -178,7 +191,8 @@ static int worker_loop(void *arg)
* jump_in?
*/
smp_mb();
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
continue;

/*
Expand All @@ -191,7 +205,8 @@ static int worker_loop(void *arg)
*/
schedule_timeout(1);
smp_mb();
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
continue;

if (kthread_should_stop())
Expand All @@ -200,7 +215,8 @@ static int worker_loop(void *arg)
/* still no more work?, sleep for real */
spin_lock_irq(&worker->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
goto again_locked;

/*
Expand Down Expand Up @@ -248,6 +264,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
INIT_LIST_HEAD(&workers->order_list);
INIT_LIST_HEAD(&workers->prio_order_list);
spin_lock_init(&workers->lock);
workers->max_workers = max;
workers->idle_thresh = 32;
Expand All @@ -273,6 +290,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
}

INIT_LIST_HEAD(&worker->pending);
INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);
atomic_set(&worker->num_pending, 0);
Expand Down Expand Up @@ -396,7 +414,10 @@ int btrfs_requeue_work(struct btrfs_work *work)
goto out;

spin_lock_irqsave(&worker->lock, flags);
list_add_tail(&work->list, &worker->pending);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
list_add_tail(&work->list, &worker->prio_pending);
else
list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);

/* by definition we're busy, take ourselves off the idle
Expand All @@ -422,6 +443,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
return 0;
}

void btrfs_set_work_high_prio(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}

/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
Expand All @@ -438,15 +464,23 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
worker = find_worker(workers);
if (workers->ordered) {
spin_lock_irqsave(&workers->lock, flags);
list_add_tail(&work->order_list, &workers->order_list);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
list_add_tail(&work->order_list,
&workers->prio_order_list);
} else {
list_add_tail(&work->order_list, &workers->order_list);
}
spin_unlock_irqrestore(&workers->lock, flags);
} else {
INIT_LIST_HEAD(&work->order_list);
}

spin_lock_irqsave(&worker->lock, flags);

list_add_tail(&work->list, &worker->pending);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
list_add_tail(&work->list, &worker->prio_pending);
else
list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker);

Expand Down
2 changes: 2 additions & 0 deletions fs/btrfs/async-thread.h
Expand Up @@ -85,6 +85,7 @@ struct btrfs_workers {
* of work items waiting for completion
*/
struct list_head order_list;
struct list_head prio_order_list;

/* lock for finding the next worker thread to queue on */
spinlock_t lock;
Expand All @@ -98,4 +99,5 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
int btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work);
#endif
17 changes: 12 additions & 5 deletions fs/btrfs/ctree.c
Expand Up @@ -1325,12 +1325,12 @@ static noinline int reada_for_balance(struct btrfs_root *root,
int ret = 0;
int blocksize;

parent = path->nodes[level - 1];
parent = path->nodes[level + 1];
if (!parent)
return 0;

nritems = btrfs_header_nritems(parent);
slot = path->slots[level];
slot = path->slots[level + 1];
blocksize = btrfs_level_size(root, level);

if (slot > 0) {
Expand All @@ -1341,7 +1341,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
block1 = 0;
free_extent_buffer(eb);
}
if (slot < nritems) {
if (slot + 1 < nritems) {
block2 = btrfs_node_blockptr(parent, slot + 1);
gen = btrfs_node_ptr_generation(parent, slot + 1);
eb = btrfs_find_tree_block(root, block2, blocksize);
Expand All @@ -1351,7 +1351,11 @@ static noinline int reada_for_balance(struct btrfs_root *root,
}
if (block1 || block2) {
ret = -EAGAIN;

/* release the whole path */
btrfs_release_path(root, path);

/* read the blocks */
if (block1)
readahead_tree_block(root, block1, blocksize, 0);
if (block2)
Expand All @@ -1361,7 +1365,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
eb = read_tree_block(root, block1, blocksize, 0);
free_extent_buffer(eb);
}
if (block1) {
if (block2) {
eb = read_tree_block(root, block2, blocksize, 0);
free_extent_buffer(eb);
}
Expand Down Expand Up @@ -1481,12 +1485,15 @@ read_block_for_search(struct btrfs_trans_handle *trans,
* of the btree by dropping locks before
* we read.
*/
btrfs_release_path(NULL, p);
btrfs_unlock_up_safe(p, level + 1);
btrfs_set_path_blocking(p);

if (tmp)
free_extent_buffer(tmp);
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);

btrfs_release_path(NULL, p);
tmp = read_tree_block(root, blocknr, blocksize, gen);
if (tmp)
free_extent_buffer(tmp);
Expand Down
9 changes: 7 additions & 2 deletions fs/btrfs/disk-io.c
Expand Up @@ -579,6 +579,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->bio_flags = bio_flags;

atomic_inc(&fs_info->nr_async_submits);

if (rw & (1 << BIO_RW_SYNCIO))
btrfs_set_work_high_prio(&async->work);

btrfs_queue_worker(&fs_info->workers, &async->work);
#if 0
int limit = btrfs_async_submit_limit(fs_info);
Expand Down Expand Up @@ -656,6 +660,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0);
}

/*
* kthread helpers are used to submit writes so that checksumming
* can happen in parallel across all CPUs
Expand Down Expand Up @@ -2095,10 +2100,10 @@ static int write_dev_supers(struct btrfs_device *device,
device->barriers = 0;
get_bh(bh);
lock_buffer(bh);
ret = submit_bh(WRITE, bh);
ret = submit_bh(WRITE_SYNC, bh);
}
} else {
ret = submit_bh(WRITE, bh);
ret = submit_bh(WRITE_SYNC, bh);
}

if (!ret && wait) {
Expand Down

0 comments on commit ccc5ff9

Please sign in to comment.