Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
8374304
XXX: Update rust/scx_utils/vmlinux.tar.zst for 6.17-rc4
Sep 6, 2025
ae41fd9
XXX ATQ: rbtree -> minheap
Sep 5, 2025
e153852
XXX: scx_lavd: 30 sec -> 3 sec timeout
Sep 2, 2025
6b24b95
scripts: Add a script to get a cgroup path from it id
Sep 19, 2025
80efcce
lib: cgroup_bw: Add skeleton for CPU bandwidth control.
Aug 20, 2025
1751d8d
scx_lavd: Reduce the BPF program size.
Sep 10, 2025
a138435
scx_lavd: Initial integration with CPU bandwidth control.
Aug 20, 2025
05f2218
scx_lavd: rename struct task_ctx to task_ctx
etsal Sep 11, 2025
6185835
scx_lavd: move task_ctx to arenas
etsal Sep 11, 2025
c8e808f
scx_lavd: mark functions taking a task context with __arg_arena
etsal Sep 11, 2025
ec731c8
lib: cgroup_bw: Implement scx_cgroup_bw_lib_init().
Aug 22, 2025
acbb5c7
lib: cgroup_bw: Implement scx_cgroup_bw_init().
Aug 24, 2025
a521348
lib: cgroup_bw: Implement scx_cgroup_bw_set().
Aug 25, 2025
abdd701
lib: cgroup_bw: Implement scx_cgroup_bw_exit().
Aug 25, 2025
a97586f
lib: cgroup_bw: Implement scx_cgroup_bw_reserve().
Aug 26, 2025
1196ea0
lib: cgroup_bw: Implement scx_cgroup_bw_consume().
Aug 30, 2025
5f72658
lib: cgroup_bw: Implement scx_cgroup_bw_put_aside().
Sep 2, 2025
7f5baf4
lib: cgroup_bw: Implement replenish timer and cbw_reenqueue_cgroup().
Aug 31, 2025
1ff27cf
scx_lavd: Support cpu.max at ops.stopping().
Sep 2, 2025
27df79c
scx_lavd: Support cpu.max at enqueue-like paths.
Sep 1, 2025
f556742
Revert "XXX ATQ: rbtree -> minheap"
etsal Sep 26, 2025
1718fc6
scx_lavd: move pid to main task_ctx
etsal Sep 26, 2025
39bedb8
lib/atq: factor out task insertion into scx_atq_insert_node
etsal Sep 26, 2025
62d3496
lib/rbtree: add noalloc/nofree variants of the API
etsal Sep 26, 2025
a092d41
lib/rbtree: turn rbtree_insert_mode from a per-insert into a per-tree…
etsal Sep 26, 2025
bd294e2
atq: only use embedded rbnodes on scx_atq_insert_*()
etsal Sep 26, 2025
e13ac08
lib/cgroup_bw: move to rbnode-based ATQ API
etsal Sep 26, 2025
41c852c
[wip] stack depth exceeded debugging
etsal Sep 27, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

96 changes: 31 additions & 65 deletions lib/atq.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ u64 scx_atq_create_internal(bool fifo, size_t capacity)
if (!atq)
return (u64)NULL;

atq->tree = rb_create();
atq->tree = rb_create(RB_NOALLOC, RB_DUPLICATE);
if (!atq->tree)
return (u64)NULL;

Expand All @@ -28,99 +28,65 @@ u64 scx_atq_create_internal(bool fifo, size_t capacity)
return (u64)atq;
}

/*
* XXXETSAL: We are using the __hidden antipattern for API functions because some
* older kernels do not allow function calls with preemption disabled. We will replace
* these annotations with the proper ones (__weak) at some point in the future.
*/

__hidden
int scx_atq_insert(scx_atq_t *atq, u64 taskc_ptr)
int scx_atq_insert_vtime(scx_atq_t __arg_arena *atq, rbnode_t __arg_arena *node, u64 taskc_ptr, u64 vtime)
{
rbnode_t *node;
int ret;

if (!atq->fifo)
return -EINVAL;

/*
* Use dummy sequence number because we're
* outside of the critical section.
*/
node = rb_node_alloc(atq->tree, 0, taskc_ptr);
if (!node)
return -ENOMEM;

ret = arena_spin_lock(&atq->lock);
if (ret) {
rb_node_free(atq->tree, node);
if (ret)
return ret;
}

if (unlikely(atq->size == atq->capacity)) {
ret = -ENOSPC;
goto error;
goto done;
}

if ((vtime == SCX_ATQ_FIFO) != atq->fifo) {
ret = -EINVAL;
goto done;
}

/*
* "Leak" the seq on error. We only want
* For FIFO, "Leak" the seq on error. We only want
* sequence numbers to be monotonic, not
* consecutive.
*/
node->key = atq->seq++;
node->key = (vtime == SCX_ATQ_FIFO) ? atq->seq++ : vtime;
node->value = taskc_ptr;

ret = rb_insert_node(atq->tree, node, RB_DUPLICATE);
ret = rb_insert_node(atq->tree, node);
if (ret)
goto error;
goto done;

atq->size += 1;

done:
arena_spin_unlock(&atq->lock);

return 0;

error:
arena_spin_unlock(&atq->lock);
rb_node_free(atq->tree, node);

return ret;
}

__hidden
int scx_atq_insert_vtime(scx_atq_t *atq, u64 taskc_ptr, u64 vtime)
int scx_atq_insert(scx_atq_t *atq, rbnode_t __arg_arena *node, u64 taskc_ptr)
{
rbnode_t *node;
int ret;

if (atq->fifo)
return -EINVAL;

node = rb_node_alloc(atq->tree, vtime, taskc_ptr);
if (!node)
return -ENOMEM;

ret = arena_spin_lock(&atq->lock);
if (ret) {
rb_node_free(atq->tree, node);
return ret;
}

if (unlikely(atq->size == atq->capacity)) {
ret = -ENOSPC;
goto error;
}

ret = rb_insert_node(atq->tree, node, RB_DUPLICATE);
if (ret)
goto error;

atq->size += 1;

arena_spin_unlock(&atq->lock);

return 0;

error:
arena_spin_unlock(&atq->lock);
rb_node_free(atq->tree, node);

return ret;
return scx_atq_insert_vtime(atq, node, taskc_ptr, SCX_ATQ_FIFO);
}

/*
* XXXETSAL: There is a mismatch between insert and pop here: We are inserting
* rbnodes, but returning a key/value pair. This is deliberate: We can use CO:RE
* to find the rbnode from any scheduler's task_ctx in a generic way, but there is
* no container_of equivalent that lets us go rbnode -> task_ctx (especially since
* the actual layout of task_ctx varies by scheduler. For now, pass the task_ctx
* as a value to the node and use it to find the original rbonde.
*/
__hidden
u64 scx_atq_pop(scx_atq_t *atq)
{
Expand Down
Loading
Loading