Skip to content

Commit

Permalink
Merge branch 'akpm' (Andrew's patch bomb)
Browse files Browse the repository at this point in the history
Merge the emailed seties of 19 patches from Andrew Morton

* akpm:
  rapidio/tsi721: fix queue wrapping bug in inbound doorbell handler
  memcg: fix mapcount check in move charge code for anonymous page
  mm: thp: fix BUG on mm->nr_ptes
  alpha: fix 32/64-bit bug in futex support
  memcg: fix GPF when cgroup removal races with last exit
  debugobjects: Fix selftest for static warnings
  floppy/scsi: fix setting of BIO flags
  memcg: fix deadlock by inverting lrucare nesting
  drivers/rtc/rtc-r9701.c: fix crash in r9701_remove()
  c2port: class_create() returns an ERR_PTR
  pps: class_create() returns an ERR_PTR, not NULL
  hung_task: fix the broken rcu_lock_break() logic
  vfork: kill PF_STARTING
  coredump_wait: don't call complete_vfork_done()
  vfork: make it killable
  vfork: introduce complete_vfork_done()
  aio: wake up waiters when freeing unused kiocbs
  kprobes: return proper error code from register_kprobe()
  kmsg_dump: don't run on non-error paths by default
  • Loading branch information
torvalds committed Mar 5, 2012
2 parents 055bf38 + b24823e commit 3e85fb9
Show file tree
Hide file tree
Showing 24 changed files with 156 additions and 164 deletions.
6 changes: 6 additions & 0 deletions Documentation/kernel-parameters.txt
Expand Up @@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

default: off.

printk.always_kmsg_dump=
Trigger kmsg_dump for cases other than kernel oops or
panics
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled

printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)

Expand Down
2 changes: 1 addition & 1 deletion arch/alpha/include/asm/futex.h
Expand Up @@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" lda $31,3b-2b(%0)\n"
" .previous\n"
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)oldval), "r"(newval)
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "memory");

*uval = prev;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/floppy.c
Expand Up @@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
bio.bi_flags = BIO_QUIET;
bio.bi_flags = (1 << BIO_QUIET);
init_completion(&complete);
bio.bi_private = &complete;
bio.bi_end_io = floppy_rb0_complete;
Expand Down
4 changes: 2 additions & 2 deletions drivers/misc/c2port/core.c
Expand Up @@ -984,9 +984,9 @@ static int __init c2port_init(void)
" - (C) 2007 Rodolfo Giometti\n");

c2port_class = class_create(THIS_MODULE, "c2port");
if (!c2port_class) {
if (IS_ERR(c2port_class)) {
printk(KERN_ERR "c2port: failed to allocate class\n");
return -ENOMEM;
return PTR_ERR(c2port_class);
}
c2port_class->dev_attrs = c2port_attrs;

Expand Down
4 changes: 2 additions & 2 deletions drivers/pps/pps.c
Expand Up @@ -369,9 +369,9 @@ static int __init pps_init(void)
int err;

pps_class = class_create(THIS_MODULE, "pps");
if (!pps_class) {
if (IS_ERR(pps_class)) {
pr_err("failed to allocate class\n");
return -ENOMEM;
return PTR_ERR(pps_class);
}
pps_class->dev_attrs = pps_attrs;

Expand Down
5 changes: 3 additions & 2 deletions drivers/rapidio/devices/tsi721.c
Expand Up @@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
*/
mport = priv->mport;

wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;

while (wr_ptr != rd_ptr) {
idb_entry = (u64 *)(priv->idb_base +
(TSI721_IDB_ENTRY_SIZE * rd_ptr));
rd_ptr++;
rd_ptr %= IDB_QSIZE;
idb.msg = *idb_entry;
*idb_entry = 0;

Expand Down
14 changes: 7 additions & 7 deletions drivers/rtc/rtc-r9701.c
Expand Up @@ -125,20 +125,20 @@ static int __devinit r9701_probe(struct spi_device *spi)
unsigned char tmp;
int res;

tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
dev_err(&spi->dev, "cannot read RTC register\n");
return -ENODEV;
}

rtc = rtc_device_register("r9701",
&spi->dev, &r9701_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);

dev_set_drvdata(&spi->dev, rtc);

tmp = R100CNT;
res = read_regs(&spi->dev, &tmp, 1);
if (res || tmp != 0x20) {
rtc_device_unregister(rtc);
return res;
}

return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/sd_dif.c
Expand Up @@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
kunmap_atomic(sdt, KM_USER0);
}

bio->bi_flags |= BIO_MAPPED_INTEGRITY;
bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
}

return 0;
Expand Down
2 changes: 2 additions & 0 deletions fs/aio.c
Expand Up @@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--;
}
if (unlikely(!ctx->reqs_active && ctx->dead))
wake_up_all(&ctx->wait);
spin_unlock_irq(&ctx->ctx_lock);
}

Expand Down
18 changes: 2 additions & 16 deletions fs/exec.c
Expand Up @@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
struct completion *vfork_done;
int core_waiters = -EBUSY;

init_completion(&core_state->startup);
Expand All @@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
up_write(&mm->mmap_sem);

if (unlikely(core_waiters < 0))
goto fail;

/*
* Make sure nobody is waiting for us to release the VM,
* otherwise we can deadlock when we wait on each other
*/
vfork_done = tsk->vfork_done;
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}

if (core_waiters)
if (core_waiters > 0)
wait_for_completion(&core_state->startup);
fail:

return core_waiters;
}

Expand Down
9 changes: 7 additions & 2 deletions include/linux/kmsg_dump.h
Expand Up @@ -15,13 +15,18 @@
#include <linux/errno.h>
#include <linux/list.h>

/*
* Keep this list arranged in rough order of priority. Anything listed after
* KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
* is passed to the kernel.
*/
enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC,
KMSG_DUMP_OOPS,
KMSG_DUMP_EMERG,
KMSG_DUMP_RESTART,
KMSG_DUMP_HALT,
KMSG_DUMP_POWEROFF,
KMSG_DUMP_EMERG,
};

/**
Expand Down
5 changes: 0 additions & 5 deletions include/linux/memcontrol.h
Expand Up @@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);

extern void mem_cgroup_reset_owner(struct page *page);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
#endif
Expand Down Expand Up @@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
}

static inline void mem_cgroup_reset_owner(struct page *page)
{
}
#endif /* CONFIG_CGROUP_MEM_CONT */

#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
Expand Down
3 changes: 1 addition & 2 deletions include/linux/sched.h
Expand Up @@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/*
* Per process flags
*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
Expand Down Expand Up @@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[].
* ->cgroup.subsys[]. And ->vfork_done.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
Expand Down
60 changes: 39 additions & 21 deletions kernel/fork.c
Expand Up @@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
return mm;
}

static void complete_vfork_done(struct task_struct *tsk)
{
struct completion *vfork;

task_lock(tsk);
vfork = tsk->vfork_done;
if (likely(vfork)) {
tsk->vfork_done = NULL;
complete(vfork);
}
task_unlock(tsk);
}

static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{
int killed;

freezer_do_not_count();
killed = wait_for_completion_killable(vfork);
freezer_count();

if (killed) {
task_lock(child);
child->vfork_done = NULL;
task_unlock(child);
}

put_task_struct(child);
return killed;
}

/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
Expand All @@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;

/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
Expand All @@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);

/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
if (tsk->vfork_done)
complete_vfork_done(tsk);

/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
* trouble, say, a killed vfork parent shouldn't touch this mm.
* Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
Expand Down Expand Up @@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)

new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
}

Expand Down Expand Up @@ -1548,27 +1575,18 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
get_task_struct(p);
}

/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't finished SIGSTOP raising yet. Now we clear it
* and set the child going.
*/
p->flags &= ~PF_STARTING;

wake_up_new_task(p);

/* forking complete and child started to run, tell ptracer */
if (unlikely(trace))
ptrace_event(trace, nr);

if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
if (!wait_for_vfork_done(p, &vfork))
ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
}
} else {
nr = PTR_ERR(p);
Expand Down
11 changes: 7 additions & 4 deletions kernel/hung_task.c
Expand Up @@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
* to exit the grace period. For classic RCU, a reschedule is required.
*/
static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
{
bool can_cont;

get_task_struct(g);
get_task_struct(t);
rcu_read_unlock();
cond_resched();
rcu_read_lock();
can_cont = pid_alive(g) && pid_alive(t);
put_task_struct(t);
put_task_struct(g);

return can_cont;
}

/*
Expand All @@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
goto unlock;
if (!--batch_count) {
batch_count = HUNG_TASK_BATCHING;
rcu_lock_break(g, t);
/* Exit if t or g was unhashed during refresh. */
if (t->state == TASK_DEAD || g->state == TASK_DEAD)
if (!rcu_lock_break(g, t))
goto unlock;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
Expand Down
12 changes: 7 additions & 5 deletions kernel/kprobes.c
Expand Up @@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr))
goto fail_with_jump_label;
jump_label_text_reserved(p->addr, p->addr)) {
ret = -EINVAL;
goto cannot_probe;
}

/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
Expand All @@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(probed_mod)))
goto fail_with_jump_label;
goto cannot_probe;

/*
* If the module freed .init.text, we couldn't insert
Expand All @@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
if (within_module_init((unsigned long)p->addr, probed_mod) &&
probed_mod->state != MODULE_STATE_COMING) {
module_put(probed_mod);
goto fail_with_jump_label;
goto cannot_probe;
}
/* ret will be updated by following code */
}
Expand Down Expand Up @@ -1409,7 +1411,7 @@ int __kprobes register_kprobe(struct kprobe *p)

return ret;

fail_with_jump_label:
cannot_probe:
preempt_enable();
jump_label_unlock();
return ret;
Expand Down

0 comments on commit 3e85fb9

Please sign in to comment.