Skip to content

Commit

Permalink
uevent: send events in correct order according to seqnum (v3)
Browse files Browse the repository at this point in the history
commit 7b60a18da393ed70db043a777fd9e6d5363077c4 upstream.

The queue handling in the udev daemon assumes that the events are
ordered.

Before this patch uevent_seqnum is incremented under sequence_lock,
than an event is send uner uevent_sock_mutex. I want to say that code
contained a window between incrementing seqnum and sending an event.

This patch locks uevent_sock_mutex before incrementing uevent_seqnum.

v2: delete sequence_lock, uevent_seqnum is protected by uevent_sock_mutex
v3: unlock the mutex before the goto exit

Thanks for Kay for the comments.

Signed-off-by: Andrew Vagin <avagin@openvz.org>
Tested-By: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
avagin authored and gregkh committed Apr 2, 2012
1 parent d16d493 commit 377c2f4
Showing 1 changed file with 9 additions and 10 deletions.
19 changes: 9 additions & 10 deletions lib/kobject_uevent.c
Expand Up @@ -29,16 +29,17 @@


u64 uevent_seqnum; u64 uevent_seqnum;
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
static DEFINE_SPINLOCK(sequence_lock);
#ifdef CONFIG_NET #ifdef CONFIG_NET
struct uevent_sock { struct uevent_sock {
struct list_head list; struct list_head list;
struct sock *sk; struct sock *sk;
}; };
static LIST_HEAD(uevent_sock_list); static LIST_HEAD(uevent_sock_list);
static DEFINE_MUTEX(uevent_sock_mutex);
#endif #endif


/* This lock protects uevent_seqnum and uevent_sock_list */
static DEFINE_MUTEX(uevent_sock_mutex);

/* the strings here must match the enum in include/linux/kobject.h */ /* the strings here must match the enum in include/linux/kobject.h */
static const char *kobject_actions[] = { static const char *kobject_actions[] = {
[KOBJ_ADD] = "add", [KOBJ_ADD] = "add",
Expand Down Expand Up @@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
struct kobject *top_kobj; struct kobject *top_kobj;
struct kset *kset; struct kset *kset;
const struct kset_uevent_ops *uevent_ops; const struct kset_uevent_ops *uevent_ops;
u64 seq;
int i = 0; int i = 0;
int retval = 0; int retval = 0;
#ifdef CONFIG_NET #ifdef CONFIG_NET
Expand Down Expand Up @@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
else if (action == KOBJ_REMOVE) else if (action == KOBJ_REMOVE)
kobj->state_remove_uevent_sent = 1; kobj->state_remove_uevent_sent = 1;


mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */ /* we will send an event, so request a new sequence number */
spin_lock(&sequence_lock); retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
seq = ++uevent_seqnum; if (retval) {
spin_unlock(&sequence_lock); mutex_unlock(&uevent_sock_mutex);
retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
if (retval)
goto exit; goto exit;
}


#if defined(CONFIG_NET) #if defined(CONFIG_NET)
/* send netlink message */ /* send netlink message */
mutex_lock(&uevent_sock_mutex);
list_for_each_entry(ue_sk, &uevent_sock_list, list) { list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk; struct sock *uevent_sock = ue_sk->sk;
struct sk_buff *skb; struct sk_buff *skb;
Expand Down Expand Up @@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
} else } else
retval = -ENOMEM; retval = -ENOMEM;
} }
mutex_unlock(&uevent_sock_mutex);
#endif #endif
mutex_unlock(&uevent_sock_mutex);


/* call uevent_helper, usually only enabled during early boot */ /* call uevent_helper, usually only enabled during early boot */
if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
Expand Down

0 comments on commit 377c2f4

Please sign in to comment.