Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/jikos/livepatching

Pull livepatching fix from Jiri Kosina:
 "Shadow variable API list_head initialization fix from Petr Mladek"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: Allow to call a custom callback when freeing shadow variables
  livepatch: Initialize shadow variables safely by a custom callback
  • Loading branch information
torvalds committed Apr 20, 2018
2 parents 36e584d + 3b2c77d commit 41e3bef
Show file tree
Hide file tree
Showing 5 changed files with 163 additions and 81 deletions.
41 changes: 29 additions & 12 deletions Documentation/livepatch/shadow-vars.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,13 @@ meta-data and shadow-data:
- data[] - storage for shadow data

It is important to note that the klp_shadow_alloc() and
klp_shadow_get_or_alloc() calls, described below, store a *copy* of the
data that the functions are provided. Callers should provide whatever
mutual exclusion is required of the shadow data.
klp_shadow_get_or_alloc() are zeroing the variable by default.
They also allow to call a custom constructor function when a non-zero
value is needed. Callers should provide whatever mutual exclusion
is required.

Note that the constructor is called under klp_shadow_lock spinlock. It allows
to do actions that can be done only once when a new variable is allocated.

* klp_shadow_get() - retrieve a shadow variable data pointer
- search hashtable for <obj, id> pair
Expand All @@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data.
- WARN and return NULL
- if <obj, id> doesn't already exist
- allocate a new shadow variable
- copy data into the new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> to the global hashtable

* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable
Expand All @@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data.
- return existing shadow variable
- if <obj, id> doesn't already exist
- allocate a new shadow variable
- copy data into the new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> pair to the global hashtable

* klp_shadow_free() - detach and free a <obj, id> shadow variable
- find and remove a <obj, id> reference from global hashtable
- if found, free shadow variable
- if found
- call destructor function if defined
- free shadow variable

* klp_shadow_free_all() - detach and free all <*, id> shadow variables
- find and remove any <*, id> references from global hashtable
- if found, free shadow variable
- if found
- call destructor function if defined
- free shadow variable


2. Use cases
Expand Down Expand Up @@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);

/* Attach a corresponding shadow variable, then initialize it */
ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp);
ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp,
NULL, NULL);
if (!ps_lock)
goto shadow_fail;
spin_lock_init(ps_lock);
Expand All @@ -131,7 +140,7 @@ variable:

void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
klp_shadow_free(sta, PS_LOCK);
klp_shadow_free(sta, PS_LOCK, NULL);
kfree(sta);
...

Expand All @@ -148,16 +157,24 @@ shadow variables to parents already in-flight.
For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is
inside ieee80211_sta_ps_deliver_wakeup():

int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
{
spinlock_t *lock = shadow_data;

spin_lock_init(lock);
return 0;
}

#define PS_LOCK 1
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
DEFINE_SPINLOCK(ps_lock_fallback);
spinlock_t *ps_lock;

/* sync with ieee80211_tx_h_unicast_ps_buf */
ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK,
&ps_lock_fallback, sizeof(ps_lock_fallback),
GFP_ATOMIC);
sizeof(*ps_lock), GFP_ATOMIC,
ps_lock_shadow_ctor, NULL);

if (ps_lock)
spin_lock(ps_lock);
...
Expand Down
19 changes: 13 additions & 6 deletions include/linux/livepatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,13 +186,20 @@ static inline bool klp_have_reliable_stack(void)
IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
}

typedef int (*klp_shadow_ctor_t)(void *obj,
void *shadow_data,
void *ctor_data);
typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data);

void *klp_shadow_get(void *obj, unsigned long id);
void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags);
void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags);
void klp_shadow_free(void *obj, unsigned long id);
void klp_shadow_free_all(unsigned long id);
void *klp_shadow_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data);
void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data);
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);

#else /* !CONFIG_LIVEPATCH */

Expand Down
108 changes: 71 additions & 37 deletions kernel/livepatch/shadow.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id)
}
EXPORT_SYMBOL_GPL(klp_shadow_get);

static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags, bool warn_on_exist)
static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data,
bool warn_on_exist)
{
struct klp_shadow *new_shadow;
void *shadow_data;
Expand All @@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
if (shadow_data)
goto exists;

/* Allocate a new shadow variable for use inside the lock below */
/*
* Allocate a new shadow variable. Fill it with zeroes by default.
* More complex setting can be done by @ctor function. But it is
* called only when the buffer is really used (under klp_shadow_lock).
*/
new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
if (!new_shadow)
return NULL;

new_shadow->obj = obj;
new_shadow->id = id;

/* Initialize the shadow variable if data provided */
if (data)
memcpy(new_shadow->data, data, size);

/* Look for <obj, id> again under the lock */
spin_lock_irqsave(&klp_shadow_lock, flags);
shadow_data = klp_shadow_get(obj, id);
Expand All @@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
goto exists;
}

new_shadow->obj = obj;
new_shadow->id = id;

if (ctor) {
int err;

err = ctor(obj, new_shadow->data, ctor_data);
if (err) {
spin_unlock_irqrestore(&klp_shadow_lock, flags);
kfree(new_shadow);
pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n",
obj, id, err);
return NULL;
}
}

/* No <obj, id> found, so attach the newly allocated one */
hash_add_rcu(klp_shadow_hash, &new_shadow->node,
(unsigned long)new_shadow->obj);
Expand All @@ -170,64 +185,84 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
* klp_shadow_alloc() - allocate and add a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @data: pointer to data to attach to parent
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
* @ctor: custom constructor to initialize the shadow data (optional)
* @ctor_data: pointer to any data needed by @ctor (optional)
*
* Allocates @size bytes for new shadow variable data using @gfp_flags.
* The data are zeroed by default. They are further initialized by @ctor
* function if it is not NULL. The new shadow variable is then added
* to the global hashtable.
*
* Allocates @size bytes for new shadow variable data using @gfp_flags
* and copies @size bytes from @data into the new shadow variable's own
* data space. If @data is NULL, @size bytes are still allocated, but
* no copy is performed. The new shadow variable is then added to the
* global hashtable.
* If an existing <obj, id> shadow variable can be found, this routine will
* issue a WARN, exit early and return NULL.
*
* If an existing <obj, id> shadow variable can be found, this routine
* will issue a WARN, exit early and return NULL.
* This function guarantees that the constructor function is called only when
* the variable did not exist before. The cost is that @ctor is called
* in atomic context under a spin lock.
*
* Return: the shadow variable data element, NULL on duplicate or
* failure.
*/
void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags)
void *klp_shadow_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data)
{
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true);
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
ctor, ctor_data, true);
}
EXPORT_SYMBOL_GPL(klp_shadow_alloc);

/**
* klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @data: pointer to data to attach to parent
* @size: size of attached data
* @gfp_flags: GFP mask for allocation
* @ctor: custom constructor to initialize the shadow data (optional)
* @ctor_data: pointer to any data needed by @ctor (optional)
*
* Returns a pointer to existing shadow data if an <obj, id> shadow
* variable is already present. Otherwise, it creates a new shadow
* variable like klp_shadow_alloc().
*
* This function guarantees that only one shadow variable exists with
* the given @id for the given @obj. It also guarantees that the shadow
* variable will be initialized by the given @data only when it did not
* exist before.
* This function guarantees that only one shadow variable exists with the given
* @id for the given @obj. It also guarantees that the constructor function
* will be called only when the variable did not exist before. The cost is
* that @ctor is called in atomic context under a spin lock.
*
* Return: the shadow variable data element, NULL on failure.
*/
void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
size_t size, gfp_t gfp_flags)
void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
size_t size, gfp_t gfp_flags,
klp_shadow_ctor_t ctor, void *ctor_data)
{
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false);
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
ctor, ctor_data, false);
}
EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);

static void klp_shadow_free_struct(struct klp_shadow *shadow,
klp_shadow_dtor_t dtor)
{
hash_del_rcu(&shadow->node);
if (dtor)
dtor(shadow->obj, shadow->data);
kfree_rcu(shadow, rcu_head);
}

/**
* klp_shadow_free() - detach and free a <obj, id> shadow variable
* @obj: pointer to parent object
* @id: data identifier
* @dtor: custom callback that can be used to unregister the variable
* and/or free data that the shadow variable points to (optional)
*
* This function releases the memory for this <obj, id> shadow variable
* instance, callers should stop referencing it accordingly.
*/
void klp_shadow_free(void *obj, unsigned long id)
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
{
struct klp_shadow *shadow;
unsigned long flags;
Expand All @@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id)
(unsigned long)obj) {

if (klp_shadow_match(shadow, obj, id)) {
hash_del_rcu(&shadow->node);
kfree_rcu(shadow, rcu_head);
klp_shadow_free_struct(shadow, dtor);
break;
}
}
Expand All @@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free);
/**
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
* @id: data identifier
* @dtor: custom callback that can be used to unregister the variable
* and/or free data that the shadow variable points to (optional)
*
* This function releases the memory for all <*, id> shadow variable
* instances, callers should stop referencing them accordingly.
*/
void klp_shadow_free_all(unsigned long id)
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
struct klp_shadow *shadow;
unsigned long flags;
Expand All @@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id)

/* Delete all <*, id> from hash */
hash_for_each(klp_shadow_hash, i, shadow, node) {
if (klp_shadow_match(shadow, shadow->obj, id)) {
hash_del_rcu(&shadow->node);
kfree_rcu(shadow, rcu_head);
}
if (klp_shadow_match(shadow, shadow->obj, id))
klp_shadow_free_struct(shadow, dtor);
}

spin_unlock_irqrestore(&klp_shadow_lock, flags);
Expand Down
Loading

0 comments on commit 41e3bef

Please sign in to comment.