Skip to content

Commit f126354

Browse files
authored
Merge pull request #357 from torvalds/master
PR
2 parents 5ad2ca2 + 9c0c4d2 commit f126354

File tree

4 files changed

+51
-16
lines changed

4 files changed

+51
-16
lines changed

block/blk-cgroup.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
18971897
{
18981898
int rwd = blk_cgroup_io_type(bio), cpu;
18991899
struct blkg_iostat_set *bis;
1900+
unsigned long flags;
19001901

19011902
cpu = get_cpu();
19021903
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
1903-
u64_stats_update_begin(&bis->sync);
1904+
flags = u64_stats_update_begin_irqsave(&bis->sync);
19041905

19051906
/*
19061907
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
@@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
19121913
}
19131914
bis->cur.ios[rwd]++;
19141915

1915-
u64_stats_update_end(&bis->sync);
1916+
u64_stats_update_end_irqrestore(&bis->sync, flags);
19161917
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
19171918
cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
19181919
put_cpu();

block/partitions/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -423,6 +423,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
423423
device_del(pdev);
424424
out_put:
425425
put_device(pdev);
426+
return ERR_PTR(err);
426427
out_put_disk:
427428
put_disk(disk);
428429
return ERR_PTR(err);

fs/io-wq.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
253253
pr_warn_once("io-wq is not configured for unbound workers");
254254

255255
raw_spin_lock(&wqe->lock);
256-
if (acct->nr_workers == acct->max_workers) {
256+
if (acct->nr_workers >= acct->max_workers) {
257257
raw_spin_unlock(&wqe->lock);
258258
return true;
259259
}
@@ -1291,15 +1291,18 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
12911291

12921292
rcu_read_lock();
12931293
for_each_node(node) {
1294+
struct io_wqe *wqe = wq->wqes[node];
12941295
struct io_wqe_acct *acct;
12951296

1297+
raw_spin_lock(&wqe->lock);
12961298
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1297-
acct = &wq->wqes[node]->acct[i];
1299+
acct = &wqe->acct[i];
12981300
prev = max_t(int, acct->max_workers, prev);
12991301
if (new_count[i])
13001302
acct->max_workers = new_count[i];
13011303
new_count[i] = prev;
13021304
}
1305+
raw_spin_unlock(&wqe->lock);
13031306
}
13041307
rcu_read_unlock();
13051308
return 0;

fs/io_uring.c

Lines changed: 42 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -456,6 +456,8 @@ struct io_ring_ctx {
456456
struct work_struct exit_work;
457457
struct list_head tctx_list;
458458
struct completion ref_comp;
459+
u32 iowq_limits[2];
460+
bool iowq_limits_set;
459461
};
460462
};
461463

@@ -1368,11 +1370,6 @@ static void io_req_track_inflight(struct io_kiocb *req)
13681370
}
13691371
}
13701372

1371-
static inline void io_unprep_linked_timeout(struct io_kiocb *req)
1372-
{
1373-
req->flags &= ~REQ_F_LINK_TIMEOUT;
1374-
}
1375-
13761373
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
13771374
{
13781375
if (WARN_ON_ONCE(!req->link))
@@ -6983,7 +6980,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
69836980
switch (io_arm_poll_handler(req)) {
69846981
case IO_APOLL_READY:
69856982
if (linked_timeout)
6986-
io_unprep_linked_timeout(req);
6983+
io_queue_linked_timeout(linked_timeout);
69876984
goto issue_sqe;
69886985
case IO_APOLL_ABORTED:
69896986
/*
@@ -9638,7 +9635,16 @@ static int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
96389635
ret = io_uring_alloc_task_context(current, ctx);
96399636
if (unlikely(ret))
96409637
return ret;
9638+
96419639
tctx = current->io_uring;
9640+
if (ctx->iowq_limits_set) {
9641+
unsigned int limits[2] = { ctx->iowq_limits[0],
9642+
ctx->iowq_limits[1], };
9643+
9644+
ret = io_wq_max_workers(tctx->io_wq, limits);
9645+
if (ret)
9646+
return ret;
9647+
}
96429648
}
96439649
if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
96449650
node = kmalloc(sizeof(*node), GFP_KERNEL);
@@ -10643,7 +10649,9 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
1064310649

1064410650
static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
1064510651
void __user *arg)
10652+
__must_hold(&ctx->uring_lock)
1064610653
{
10654+
struct io_tctx_node *node;
1064710655
struct io_uring_task *tctx = NULL;
1064810656
struct io_sq_data *sqd = NULL;
1064910657
__u32 new_count[2];
@@ -10674,13 +10682,19 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
1067410682
tctx = current->io_uring;
1067510683
}
1067610684

10677-
ret = -EINVAL;
10678-
if (!tctx || !tctx->io_wq)
10679-
goto err;
10685+
BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
1068010686

10681-
ret = io_wq_max_workers(tctx->io_wq, new_count);
10682-
if (ret)
10683-
goto err;
10687+
memcpy(ctx->iowq_limits, new_count, sizeof(new_count));
10688+
ctx->iowq_limits_set = true;
10689+
10690+
ret = -EINVAL;
10691+
if (tctx && tctx->io_wq) {
10692+
ret = io_wq_max_workers(tctx->io_wq, new_count);
10693+
if (ret)
10694+
goto err;
10695+
} else {
10696+
memset(new_count, 0, sizeof(new_count));
10697+
}
1068410698

1068510699
if (sqd) {
1068610700
mutex_unlock(&sqd->lock);
@@ -10690,6 +10704,22 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
1069010704
if (copy_to_user(arg, new_count, sizeof(new_count)))
1069110705
return -EFAULT;
1069210706

10707+
/* that's it for SQPOLL, only the SQPOLL task creates requests */
10708+
if (sqd)
10709+
return 0;
10710+
10711+
/* now propagate the restriction to all registered users */
10712+
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
10713+
struct io_uring_task *tctx = node->task->io_uring;
10714+
10715+
if (WARN_ON_ONCE(!tctx->io_wq))
10716+
continue;
10717+
10718+
for (i = 0; i < ARRAY_SIZE(new_count); i++)
10719+
new_count[i] = ctx->iowq_limits[i];
10720+
/* ignore errors, it always returns zero anyway */
10721+
(void)io_wq_max_workers(tctx->io_wq, new_count);
10722+
}
1069310723
return 0;
1069410724
err:
1069510725
if (sqd) {

0 commit comments

Comments
 (0)