Skip to content

Commit 7d7efec

Browse files
committed
sched, cgroup: reorganize threadgroup locking
threadgroup_change_begin/end() are used to mark the beginning and end of threadgroup modifying operations to allow code paths which require a threadgroup to stay stable across blocking operations to synchronize against those sections using threadgroup_lock/unlock(). It's currently implemented as a general mechanism in sched.h using per-signal_struct rwsem; however, this never grew non-cgroup use cases and becomes noop if !CONFIG_CGROUPS. It turns out that cgroups is gonna be better served with a different sycnrhonization scheme and is a bit silly to keep cgroups specific details as a general mechanism. What's general here is identifying the places where threadgroups are modified. This patch restructures threadgroup locking so that threadgroup_change_begin/end() become a place where subsystems which need to sycnhronize against threadgroup changes can hook into. cgroup_threadgroup_change_begin/end() which operate on the per-signal_struct rwsem are created and threadgroup_lock/unlock() are moved to cgroup.c and made static. This is pure reorganization which doesn't cause any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org>
1 parent 8ab456a commit 7d7efec

File tree

3 files changed

+69
-36
lines changed

3 files changed

+69
-36
lines changed

include/linux/cgroup-defs.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/mutex.h>
1515
#include <linux/rcupdate.h>
1616
#include <linux/percpu-refcount.h>
17+
#include <linux/percpu-rwsem.h>
1718
#include <linux/workqueue.h>
1819

1920
#ifdef CONFIG_CGROUPS
@@ -460,5 +461,14 @@ struct cgroup_subsys {
460461
unsigned int depends_on;
461462
};
462463

464+
void cgroup_threadgroup_change_begin(struct task_struct *tsk);
465+
void cgroup_threadgroup_change_end(struct task_struct *tsk);
466+
467+
#else /* CONFIG_CGROUPS */
468+
469+
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
470+
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
471+
463472
#endif /* CONFIG_CGROUPS */
473+
464474
#endif /* _LINUX_CGROUP_DEFS_H */

include/linux/sched.h

Lines changed: 17 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ struct sched_param {
5858
#include <linux/uidgid.h>
5959
#include <linux/gfp.h>
6060
#include <linux/magic.h>
61+
#include <linux/cgroup-defs.h>
6162

6263
#include <asm/processor.h>
6364

@@ -2648,53 +2649,33 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
26482649
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
26492650
}
26502651

2651-
#ifdef CONFIG_CGROUPS
2652-
static inline void threadgroup_change_begin(struct task_struct *tsk)
2653-
{
2654-
down_read(&tsk->signal->group_rwsem);
2655-
}
2656-
static inline void threadgroup_change_end(struct task_struct *tsk)
2657-
{
2658-
up_read(&tsk->signal->group_rwsem);
2659-
}
2660-
26612652
/**
2662-
* threadgroup_lock - lock threadgroup
2663-
* @tsk: member task of the threadgroup to lock
2664-
*
2665-
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2666-
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2667-
* change ->group_leader/pid. This is useful for cases where the threadgroup
2668-
* needs to stay stable across blockable operations.
2653+
* threadgroup_change_begin - mark the beginning of changes to a threadgroup
2654+
* @tsk: task causing the changes
26692655
*
2670-
* fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2671-
* synchronization. While held, no new task will be added to threadgroup
2672-
* and no existing live task will have its PF_EXITING set.
2673-
*
2674-
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
2675-
* sub-thread becomes a new leader.
2656+
* All operations which modify a threadgroup - a new thread joining the
2657+
* group, death of a member thread (the assertion of PF_EXITING) and
2658+
* exec(2) dethreading the process and replacing the leader - are wrapped
2659+
* by threadgroup_change_{begin|end}(). This is to provide a place which
2660+
* subsystems needing threadgroup stability can hook into for
2661+
* synchronization.
26762662
*/
2677-
static inline void threadgroup_lock(struct task_struct *tsk)
2663+
static inline void threadgroup_change_begin(struct task_struct *tsk)
26782664
{
2679-
down_write(&tsk->signal->group_rwsem);
2665+
might_sleep();
2666+
cgroup_threadgroup_change_begin(tsk);
26802667
}
26812668

26822669
/**
2683-
* threadgroup_unlock - unlock threadgroup
2684-
* @tsk: member task of the threadgroup to unlock
2670+
* threadgroup_change_end - mark the end of changes to a threadgroup
2671+
* @tsk: task causing the changes
26852672
*
2686-
* Reverse threadgroup_lock().
2673+
* See threadgroup_change_begin().
26872674
*/
2688-
static inline void threadgroup_unlock(struct task_struct *tsk)
2675+
static inline void threadgroup_change_end(struct task_struct *tsk)
26892676
{
2690-
up_write(&tsk->signal->group_rwsem);
2677+
cgroup_threadgroup_change_end(tsk);
26912678
}
2692-
#else
2693-
static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2694-
static inline void threadgroup_change_end(struct task_struct *tsk) {}
2695-
static inline void threadgroup_lock(struct task_struct *tsk) {}
2696-
static inline void threadgroup_unlock(struct task_struct *tsk) {}
2697-
#endif
26982679

26992680
#ifndef __HAVE_THREAD_FUNCTIONS
27002681

kernel/cgroup.c

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -848,6 +848,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
848848
return cset;
849849
}
850850

851+
void cgroup_threadgroup_change_begin(struct task_struct *tsk)
852+
{
853+
down_read(&tsk->signal->group_rwsem);
854+
}
855+
856+
void cgroup_threadgroup_change_end(struct task_struct *tsk)
857+
{
858+
up_read(&tsk->signal->group_rwsem);
859+
}
860+
861+
/**
862+
* threadgroup_lock - lock threadgroup
863+
* @tsk: member task of the threadgroup to lock
864+
*
865+
* Lock the threadgroup @tsk belongs to. No new task is allowed to enter
866+
* and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
867+
* change ->group_leader/pid. This is useful for cases where the threadgroup
868+
* needs to stay stable across blockable operations.
869+
*
870+
* fork and exit explicitly call threadgroup_change_{begin|end}() for
871+
* synchronization. While held, no new task will be added to threadgroup
872+
* and no existing live task will have its PF_EXITING set.
873+
*
874+
* de_thread() does threadgroup_change_{begin|end}() when a non-leader
875+
* sub-thread becomes a new leader.
876+
*/
877+
static void threadgroup_lock(struct task_struct *tsk)
878+
{
879+
down_write(&tsk->signal->group_rwsem);
880+
}
881+
882+
/**
883+
* threadgroup_unlock - unlock threadgroup
884+
* @tsk: member task of the threadgroup to unlock
885+
*
886+
* Reverse threadgroup_lock().
887+
*/
888+
static inline void threadgroup_unlock(struct task_struct *tsk)
889+
{
890+
up_write(&tsk->signal->group_rwsem);
891+
}
892+
851893
static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
852894
{
853895
struct cgroup *root_cgrp = kf_root->kn->priv;

0 commit comments

Comments
 (0)