From a1fa7a925476ae31318c2ebdc88c9d6886fbd204 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erdin=C3=A7=20G=C3=BCltekin?= Date: Mon, 14 Jan 2013 15:48:47 -0800 Subject: [PATCH] Create patch-3.7.2.patch --- .../files/patches/linux/patch-3.7.2.patch | 5885 +++++++++++++++++ 1 file changed, 5885 insertions(+) create mode 100644 kernel/kernel/files/patches/linux/patch-3.7.2.patch diff --git a/kernel/kernel/files/patches/linux/patch-3.7.2.patch b/kernel/kernel/files/patches/linux/patch-3.7.2.patch new file mode 100644 index 0000000000..82b2a1d12e --- /dev/null +++ b/kernel/kernel/files/patches/linux/patch-3.7.2.patch @@ -0,0 +1,5885 @@ +diff --git a/Makefile b/Makefile +index 540f7b2..23807aa 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 7 +-SUBLEVEL = 0 ++SUBLEVEL = 2 + EXTRAVERSION = + NAME = Terrified Chipmunk + +diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c +index df74518..ab1017b 100644 +--- a/arch/arm/kernel/swp_emulate.c ++++ b/arch/arm/kernel/swp_emulate.c +@@ -109,10 +109,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) + { + siginfo_t info; + ++ down_read(¤t->mm->mmap_sem); + if (find_vma(current->mm, addr) == NULL) + info.si_code = SEGV_MAPERR; + else + info.si_code = SEGV_ACCERR; ++ up_read(¤t->mm->mmap_sem); + + info.si_signo = SIGSEGV; + info.si_errno = 0; +diff --git a/arch/arm/mach-realview/include/mach/board-eb.h b/arch/arm/mach-realview/include/mach/board-eb.h +index 124bce6..a301e61 100644 +--- a/arch/arm/mach-realview/include/mach/board-eb.h ++++ b/arch/arm/mach-realview/include/mach/board-eb.h +@@ -47,7 +47,7 @@ + #define REALVIEW_EB_USB_BASE 0x4F000000 /* USB */ + + #ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB +-#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x1F000000 ++#define REALVIEW_EB11MP_PRIV_MEM_BASE 0x10100000 + #define REALVIEW_EB11MP_L220_BASE 0x10102000 /* L220 registers */ + #define REALVIEW_EB11MP_SYS_PLD_CTRL1 0xD8 /* Register offset for MPCore sysctl */ + #else +diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S +index cd95664..7539ec2 100644 +--- a/arch/arm/mm/cache-v7.S ++++ b/arch/arm/mm/cache-v7.S +@@ -44,8 +44,10 @@ ENDPROC(v7_flush_icache_all) + ENTRY(v7_flush_dcache_louis) + dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr +- ands r3, r0, #0xe00000 @ extract LoUIS from clidr +- mov r3, r3, lsr #20 @ r3 = LoUIS * 2 ++ ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr ++ ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr ++ ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 ++ ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 + moveq pc, lr @ return if level == 0 + mov r10, #0 @ r10 (starting level) = 0 + b flush_levels @ start flushing cache levels +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index 941dfb9..99b47b9 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -488,7 +488,7 @@ static void __init build_mem_type_table(void) + #endif + + for (i = 0; i < 16; i++) { +- unsigned long v = pgprot_val(protection_map[i]); ++ pteval_t v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); + } + +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 14aba2d..64b1339 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -159,6 +159,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + { + if (pte_present_exec_user(pte)) + __sync_icache_dcache(pte, addr); ++ if (!pte_dirty(pte)) ++ pte = pte_wrprotect(pte); + set_pte(ptep, pte); + } + +diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h +index 656a6f2..660f210 100644 +--- a/arch/arm64/include/asm/unistd32.h ++++ b/arch/arm64/include/asm/unistd32.h +@@ -393,7 +393,7 @@ __SYSCALL(368, compat_sys_fanotify_mark_wrapper) + __SYSCALL(369, sys_prlimit64) + __SYSCALL(370, sys_name_to_handle_at) + __SYSCALL(371, compat_sys_open_by_handle_at) +-__SYSCALL(372, sys_clock_adjtime) ++__SYSCALL(372, compat_sys_clock_adjtime) + __SYSCALL(373, sys_syncfs) + + #define __NR_compat_syscalls 374 +diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c +index 8807ba2..051bb52 100644 +--- a/arch/arm64/kernel/signal.c ++++ b/arch/arm64/kernel/signal.c +@@ -41,6 +41,8 @@ + struct rt_sigframe { + struct siginfo info; + struct ucontext uc; ++ u64 fp; ++ u64 lr; + }; + + static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) +@@ -175,6 +177,10 @@ static int setup_sigframe(struct rt_sigframe __user *sf, + struct aux_context __user *aux = + (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; + ++ /* set up the stack frame for unwinding */ ++ __put_user_error(regs->regs[29], &sf->fp, err); ++ __put_user_error(regs->regs[30], &sf->lr, err); ++ + for (i = 0; i < 31; i++) + __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); +@@ -210,9 +216,6 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) + sp = sp_top = current->sas_ss_sp + current->sas_ss_size; + +- /* room for stack frame (FP, LR) */ +- sp -= 16; +- + sp = (sp - framesize) & ~15; + frame = (void __user *)sp; + +@@ -225,20 +228,14 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + return frame; + } + +-static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, +- void __user *frame, int usig) ++static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, ++ void __user *frame, int usig) + { +- int err = 0; + __sigrestore_t sigtramp; +- unsigned long __user *sp = (unsigned long __user *)regs->sp; +- +- /* set up the stack frame */ +- __put_user_error(regs->regs[29], sp - 2, err); +- __put_user_error(regs->regs[30], sp - 1, err); + + regs->regs[0] = usig; +- regs->regs[29] = regs->sp - 16; + regs->sp = (unsigned long)frame; ++ regs->regs[29] = regs->sp + offsetof(struct rt_sigframe, fp); + regs->pc = (unsigned long)ka->sa.sa_handler; + + if (ka->sa.sa_flags & SA_RESTORER) +@@ -247,8 +244,6 @@ static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, + sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); + + regs->regs[30] = (unsigned long)sigtramp; +- +- return err; + } + + static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, +@@ -272,13 +267,13 @@ static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, + err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); + + err |= setup_sigframe(frame, regs, set); +- if (err == 0) +- err = setup_return(regs, ka, frame, usig); +- +- if (err == 0 && ka->sa.sa_flags & SA_SIGINFO) { +- err |= copy_siginfo_to_user(&frame->info, info); +- regs->regs[1] = (unsigned long)&frame->info; +- regs->regs[2] = (unsigned long)&frame->uc; ++ if (err == 0) { ++ setup_return(regs, ka, frame, usig); ++ if (ka->sa.sa_flags & SA_SIGINFO) { ++ err |= copy_siginfo_to_user(&frame->info, info); ++ regs->regs[1] = (unsigned long)&frame->info; ++ regs->regs[2] = (unsigned long)&frame->uc; ++ } + } + + return err; +diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h +index 32567bc..ac12ae2 100644 +--- a/arch/cris/include/asm/io.h ++++ b/arch/cris/include/asm/io.h +@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr) + #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) + #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) + #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) +-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1) +-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1) +-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1) +-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count) +-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count) +-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count) ++static inline void outb(unsigned char data, unsigned int port) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *) &data, 1, 1); ++} ++static inline void outw(unsigned short data, unsigned int port) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *) &data, 2, 1); ++} ++static inline void outl(unsigned int data, unsigned int port) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *) &data, 4, 1); ++} ++static inline void outsb(unsigned int port, const void *addr, ++ unsigned long count) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *)addr, 1, count); ++} ++static inline void outsw(unsigned int port, const void *addr, ++ unsigned long count) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *)addr, 2, count); ++} ++static inline void outsl(unsigned int port, const void *addr, ++ unsigned long count) ++{ ++ if (cris_iops) ++ cris_iops->write_io(port, (void *)addr, 4, count); ++} + + /* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem +diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S +index 23f6cbb..1cda8aa 100644 +--- a/arch/sparc/crypto/aes_asm.S ++++ b/arch/sparc/crypto/aes_asm.S +@@ -1024,7 +1024,11 @@ ENTRY(aes_sparc64_ecb_encrypt_256) + add %o2, 0x20, %o2 + brlz,pt %o3, 11f + nop +-10: ldx [%o1 + 0x00], %g3 ++10: ldd [%o0 + 0xd0], %f56 ++ ldd [%o0 + 0xd8], %f58 ++ ldd [%o0 + 0xe0], %f60 ++ ldd [%o0 + 0xe8], %f62 ++ ldx [%o1 + 0x00], %g3 + ldx [%o1 + 0x08], %g7 + xor %g1, %g3, %g3 + xor %g2, %g7, %g7 +@@ -1128,9 +1132,9 @@ ENTRY(aes_sparc64_ecb_decrypt_256) + /* %o0=&key[key_len], %o1=input, %o2=output, %o3=len */ + ldx [%o0 - 0x10], %g1 + subcc %o3, 0x10, %o3 ++ ldx [%o0 - 0x08], %g2 + be 10f +- ldx [%o0 - 0x08], %g2 +- sub %o0, 0xf0, %o0 ++ sub %o0, 0xf0, %o0 + 1: ldx [%o1 + 0x00], %g3 + ldx [%o1 + 0x08], %g7 + ldx [%o1 + 0x10], %o4 +@@ -1154,7 +1158,11 @@ ENTRY(aes_sparc64_ecb_decrypt_256) + add %o2, 0x20, %o2 + brlz,pt %o3, 11f + nop +-10: ldx [%o1 + 0x00], %g3 ++10: ldd [%o0 + 0x18], %f56 ++ ldd [%o0 + 0x10], %f58 ++ ldd [%o0 + 0x08], %f60 ++ ldd [%o0 + 0x00], %f62 ++ ldx [%o1 + 0x00], %g3 + ldx [%o1 + 0x08], %g7 + xor %g1, %g3, %g3 + xor %g2, %g7, %g7 +@@ -1511,11 +1519,11 @@ ENTRY(aes_sparc64_ctr_crypt_256) + add %o2, 0x20, %o2 + brlz,pt %o3, 11f + nop +- ldd [%o0 + 0xd0], %f56 ++10: ldd [%o0 + 0xd0], %f56 + ldd [%o0 + 0xd8], %f58 + ldd [%o0 + 0xe0], %f60 + ldd [%o0 + 0xe8], %f62 +-10: xor %g1, %g3, %o5 ++ xor %g1, %g3, %o5 + MOVXTOD_O5_F0 + xor %g2, %g7, %o5 + MOVXTOD_O5_F2 +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c +index 3965d1d..503e6d9 100644 +--- a/arch/sparc/crypto/aes_glue.c ++++ b/arch/sparc/crypto/aes_glue.c +@@ -222,6 +222,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->ops->load_encrypt_keys(&ctx->key[0]); + while ((nbytes = walk.nbytes)) { +@@ -251,6 +252,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->ops->load_decrypt_keys(&ctx->key[0]); + key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; +@@ -280,6 +282,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->ops->load_encrypt_keys(&ctx->key[0]); + while ((nbytes = walk.nbytes)) { +@@ -309,6 +312,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->ops->load_decrypt_keys(&ctx->key[0]); + key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; +@@ -329,6 +333,22 @@ static int cbc_decrypt(struct blkcipher_desc *desc, + return err; + } + ++static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, ++ struct blkcipher_walk *walk) ++{ ++ u8 *ctrblk = walk->iv; ++ u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; ++ u8 *src = walk->src.virt.addr; ++ u8 *dst = walk->dst.virt.addr; ++ unsigned int nbytes = walk->nbytes; ++ ++ ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk, ++ keystream, AES_BLOCK_SIZE); ++ crypto_xor((u8 *) keystream, src, nbytes); ++ memcpy(dst, keystream, nbytes); ++ crypto_inc(ctrblk, AES_BLOCK_SIZE); ++} ++ + static int ctr_crypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +@@ -338,10 +358,11 @@ static int ctr_crypt(struct blkcipher_desc *desc, + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); +- err = blkcipher_walk_virt(desc, &walk); ++ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + ctx->ops->load_encrypt_keys(&ctx->key[0]); +- while ((nbytes = walk.nbytes)) { ++ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + unsigned int block_len = nbytes & AES_BLOCK_MASK; + + if (likely(block_len)) { +@@ -353,6 +374,10 @@ static int ctr_crypt(struct blkcipher_desc *desc, + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } ++ if (walk.nbytes) { ++ ctr_crypt_final(ctx, &walk); ++ err = blkcipher_walk_done(desc, &walk, 0); ++ } + fprs_write(0); + return err; + } +@@ -418,7 +443,7 @@ static struct crypto_alg algs[] = { { + .cra_driver_name = "ctr-aes-sparc64", + .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, +- .cra_blocksize = AES_BLOCK_SIZE, ++ .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_blkcipher_type, +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c +index 62c89af..888f626 100644 +--- a/arch/sparc/crypto/camellia_glue.c ++++ b/arch/sparc/crypto/camellia_glue.c +@@ -98,6 +98,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + if (encrypt) + key = &ctx->encrypt_key[0]; +@@ -160,6 +161,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + key = &ctx->encrypt_key[0]; + camellia_sparc64_load_keys(key, ctx->key_len); +@@ -198,6 +200,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + key = &ctx->decrypt_key[0]; + camellia_sparc64_load_keys(key, ctx->key_len); +diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S +index 30b6e90..b5c8fc2 100644 +--- a/arch/sparc/crypto/des_asm.S ++++ b/arch/sparc/crypto/des_asm.S +@@ -376,6 +376,7 @@ ENTRY(des3_ede_sparc64_ecb_crypt) + 1: ldd [%o1 + 0x00], %f60 + DES3_LOOP_BODY(60) + std %f60, [%o2 + 0x00] ++ add %o1, 0x08, %o1 + subcc %o3, 0x08, %o3 + bne,pt %icc, 1b + add %o2, 0x08, %o2 +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c +index 41524ce..3065bc6 100644 +--- a/arch/sparc/crypto/des_glue.c ++++ b/arch/sparc/crypto/des_glue.c +@@ -100,6 +100,7 @@ static int __ecb_crypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + if (encrypt) + des_sparc64_load_keys(&ctx->encrypt_expkey[0]); +@@ -147,6 +148,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + des_sparc64_load_keys(&ctx->encrypt_expkey[0]); + while ((nbytes = walk.nbytes)) { +@@ -177,6 +179,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + des_sparc64_load_keys(&ctx->decrypt_expkey[0]); + while ((nbytes = walk.nbytes)) { +@@ -266,6 +269,7 @@ static int __ecb3_crypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + if (encrypt) + K = &ctx->encrypt_expkey[0]; +@@ -317,6 +321,7 @@ static int cbc3_encrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + K = &ctx->encrypt_expkey[0]; + des3_ede_sparc64_load_keys(K); +@@ -352,6 +357,7 @@ static int cbc3_decrypt(struct blkcipher_desc *desc, + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); ++ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + K = &ctx->decrypt_expkey[0]; + des3_ede_sparc64_load_keys(K); +diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h +index 8c5eed6..9661e9b 100644 +--- a/arch/sparc/include/asm/hugetlb.h ++++ b/arch/sparc/include/asm/hugetlb.h +@@ -61,14 +61,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) + static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) + { +- ptep_set_wrprotect(mm, addr, ptep); ++ pte_t old_pte = *ptep; ++ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); + } + + static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) + { +- return ptep_set_access_flags(vma, addr, ptep, pte, dirty); ++ int changed = !pte_same(*ptep, pte); ++ if (changed) { ++ set_huge_pte_at(vma->vm_mm, addr, ptep, pte); ++ flush_tlb_page(vma, addr); ++ } ++ return changed; + } + + static inline pte_t huge_ptep_get(pte_t *ptep) +diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c +index 1460a5d..e28670f 100644 +--- a/arch/x86/kernel/hpet.c ++++ b/arch/x86/kernel/hpet.c +@@ -434,7 +434,7 @@ void hpet_msi_unmask(struct irq_data *data) + + /* unmask it */ + cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); +- cfg |= HPET_TN_FSB; ++ cfg |= HPET_TN_ENABLE | HPET_TN_FSB; + hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); + } + +@@ -445,7 +445,7 @@ void hpet_msi_mask(struct irq_data *data) + + /* mask it */ + cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); +- cfg &= ~HPET_TN_FSB; ++ cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); + hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); + } + +diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c +index 720e973f..487d9f3 100644 +--- a/arch/x86/pci/common.c ++++ b/arch/x86/pci/common.c +@@ -433,7 +433,8 @@ static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "ftServer"), ++ DMI_MATCH(DMI_SYS_VENDOR, "Stratus"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), + }, + }, + {} +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c +index 45e3e17..7efaeaa 100644 +--- a/drivers/acpi/battery.c ++++ b/drivers/acpi/battery.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_ACPI_PROCFS_POWER + #include +@@ -95,6 +96,18 @@ enum { + ACPI_BATTERY_ALARM_PRESENT, + ACPI_BATTERY_XINFO_PRESENT, + ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, ++ /* On Lenovo Thinkpad models from 2010 and 2011, the power unit ++ switches between mWh and mAh depending on whether the system ++ is running on battery or not. When mAh is the unit, most ++ reported values are incorrect and need to be adjusted by ++ 10000/design_voltage. Verified on x201, t410, t410s, and x220. ++ Pre-2010 and 2012 models appear to always report in mWh and ++ are thus unaffected (tested with t42, t61, t500, x200, x300, ++ and x230). Also, in mid-2012 Lenovo issued a BIOS update for ++ the 2011 models that fixes the issue (tested on x220 with a ++ post-1.29 BIOS), but as of Nov. 2012, no such update is ++ available for the 2010 models. */ ++ ACPI_BATTERY_QUIRK_THINKPAD_MAH, + }; + + struct acpi_battery { +@@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) + kfree(buffer.pointer); + if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) + battery->full_charge_capacity = battery->design_capacity; ++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && ++ battery->power_unit && battery->design_voltage) { ++ battery->design_capacity = battery->design_capacity * ++ 10000 / battery->design_voltage; ++ battery->full_charge_capacity = battery->full_charge_capacity * ++ 10000 / battery->design_voltage; ++ battery->design_capacity_warning = ++ battery->design_capacity_warning * ++ 10000 / battery->design_voltage; ++ /* Curiously, design_capacity_low, unlike the rest of them, ++ is correct. */ ++ /* capacity_granularity_* equal 1 on the systems tested, so ++ it's impossible to tell if they would need an adjustment ++ or not if their values were higher. */ ++ } + return result; + } + +@@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) + && battery->capacity_now >= 0 && battery->capacity_now <= 100) + battery->capacity_now = (battery->capacity_now * + battery->full_charge_capacity) / 100; ++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && ++ battery->power_unit && battery->design_voltage) { ++ battery->capacity_now = battery->capacity_now * ++ 10000 / battery->design_voltage; ++ } + return result; + } + +@@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery) + mutex_unlock(&battery->sysfs_lock); + } + ++static void find_battery(const struct dmi_header *dm, void *private) ++{ ++ struct acpi_battery *battery = (struct acpi_battery *)private; ++ /* Note: the hardcoded offsets below have been extracted from ++ the source code of dmidecode. */ ++ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { ++ const u8 *dmi_data = (const u8 *)(dm + 1); ++ int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); ++ if (dm->length >= 18) ++ dmi_capacity *= dmi_data[17]; ++ if (battery->design_capacity * battery->design_voltage / 1000 ++ != dmi_capacity && ++ battery->design_capacity * 10 == dmi_capacity) ++ set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, ++ &battery->flags); ++ } ++} ++ + /* + * According to the ACPI spec, some kinds of primary batteries can + * report percentage battery remaining capacity directly to OS. +@@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery) + battery->capacity_now = (battery->capacity_now * + battery->full_charge_capacity) / 100; + } ++ ++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) ++ return ; ++ ++ if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { ++ const char *s; ++ s = dmi_get_system_info(DMI_PRODUCT_VERSION); ++ if (s && !strnicmp(s, "ThinkPad", 8)) { ++ dmi_walk(find_battery, battery); ++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, ++ &battery->flags) && ++ battery->design_voltage) { ++ battery->design_capacity = ++ battery->design_capacity * ++ 10000 / battery->design_voltage; ++ battery->full_charge_capacity = ++ battery->full_charge_capacity * ++ 10000 / battery->design_voltage; ++ battery->design_capacity_warning = ++ battery->design_capacity_warning * ++ 10000 / battery->design_voltage; ++ battery->capacity_now = battery->capacity_now * ++ 10000 / battery->design_voltage; ++ } ++ } ++ } + } + + static int acpi_battery_update(struct acpi_battery *battery) +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 1fcb867..c441834 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -1227,7 +1227,7 @@ static void acpi_device_set_id(struct acpi_device *device) + acpi_add_id(device, ACPI_DOCK_HID); + else if (!acpi_ibm_smbus_match(device)) + acpi_add_id(device, ACPI_SMBUS_IBM_HID); +- else if (!acpi_device_hid(device) && ++ else if (list_empty(&device->pnp.ids) && + ACPI_IS_ROOT_DEVICE(device->parent)) { + acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ + strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c +index fdcdbb6..813aa38 100644 +--- a/drivers/acpi/sleep.c ++++ b/drivers/acpi/sleep.c +@@ -109,6 +109,180 @@ void __init acpi_old_suspend_ordering(void) + old_suspend_ordering = true; + } + ++static int __init init_old_suspend_ordering(const struct dmi_system_id *d) ++{ ++ acpi_old_suspend_ordering(); ++ return 0; ++} ++ ++static int __init init_nvs_nosave(const struct dmi_system_id *d) ++{ ++ acpi_nvs_nosave(); ++ return 0; ++} ++ ++static struct dmi_system_id __initdata acpisleep_dmi_table[] = { ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "Abit KN9 (nForce4 variant)", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), ++ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), ++ }, ++ }, ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "HP xw4600 Workstation", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), ++ }, ++ }, ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), ++ }, ++ }, ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "Panasonic CF51-2L", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, ++ "Matsushita Electric Industrial Co.,Ltd."), ++ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VGN-FW21E", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VPCEB17FX", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VGN-SR11M", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Everex StepNote Series", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VPCEB1Z1E", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VGN-NW130D", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VPCCW29FX", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Averatec AV1020-ED2", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), ++ }, ++ }, ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "Asus A8N-SLI DELUXE", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), ++ }, ++ }, ++ { ++ .callback = init_old_suspend_ordering, ++ .ident = "Asus A8N-SLI Premium", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VGN-SR26GN_P", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VPCEB1S1E", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Sony Vaio VGN-FW520F", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Asus K54C", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), ++ }, ++ }, ++ { ++ .callback = init_nvs_nosave, ++ .ident = "Asus K54HR", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), ++ }, ++ }, ++ {}, ++}; ++ ++static void acpi_sleep_dmi_check(void) ++{ ++ dmi_check_system(acpisleep_dmi_table); ++} ++ + /** + * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. + */ +@@ -224,6 +398,7 @@ static void acpi_pm_end(void) + } + #else /* !CONFIG_ACPI_SLEEP */ + #define acpi_target_sleep_state ACPI_STATE_S0 ++static inline void acpi_sleep_dmi_check(void) {} + #endif /* CONFIG_ACPI_SLEEP */ + + #ifdef CONFIG_SUSPEND +@@ -382,167 +557,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { + .end = acpi_pm_end, + .recover = acpi_pm_finish, + }; +- +-static int __init init_old_suspend_ordering(const struct dmi_system_id *d) +-{ +- old_suspend_ordering = true; +- return 0; +-} +- +-static int __init init_nvs_nosave(const struct dmi_system_id *d) +-{ +- acpi_nvs_nosave(); +- return 0; +-} +- +-static struct dmi_system_id __initdata acpisleep_dmi_table[] = { +- { +- .callback = init_old_suspend_ordering, +- .ident = "Abit KN9 (nForce4 variant)", +- .matches = { +- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), +- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), +- }, +- }, +- { +- .callback = init_old_suspend_ordering, +- .ident = "HP xw4600 Workstation", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), +- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), +- }, +- }, +- { +- .callback = init_old_suspend_ordering, +- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", +- .matches = { +- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), +- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), +- }, +- }, +- { +- .callback = init_old_suspend_ordering, +- .ident = "Panasonic CF51-2L", +- .matches = { +- DMI_MATCH(DMI_BOARD_VENDOR, +- "Matsushita Electric Industrial Co.,Ltd."), +- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VGN-FW21E", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VPCEB17FX", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VGN-SR11M", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Everex StepNote Series", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), +- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VPCEB1Z1E", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VGN-NW130D", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VPCCW29FX", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Averatec AV1020-ED2", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), +- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), +- }, +- }, +- { +- .callback = init_old_suspend_ordering, +- .ident = "Asus A8N-SLI DELUXE", +- .matches = { +- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), +- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), +- }, +- }, +- { +- .callback = init_old_suspend_ordering, +- .ident = "Asus A8N-SLI Premium", +- .matches = { +- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), +- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VGN-SR26GN_P", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Sony Vaio VGN-FW520F", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), +- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Asus K54C", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), +- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), +- }, +- }, +- { +- .callback = init_nvs_nosave, +- .ident = "Asus K54HR", +- .matches = { +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), +- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), +- }, +- }, +- {}, +-}; + #endif /* CONFIG_SUSPEND */ + + #ifdef CONFIG_HIBERNATION +@@ -873,13 +887,13 @@ int __init acpi_sleep_init(void) + u8 type_a, type_b; + #ifdef CONFIG_SUSPEND + int i = 0; +- +- dmi_check_system(acpisleep_dmi_table); + #endif + + if (acpi_disabled) + return 0; + ++ acpi_sleep_dmi_check(); ++ + sleep_states[ACPI_STATE_S0] = 1; + printk(KERN_INFO PREFIX "(supports S0"); + +diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c +index 0230cb6..ac9a69c 100644 +--- a/drivers/acpi/video.c ++++ b/drivers/acpi/video.c +@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) + return 0; + } + ++static int video_ignore_initial_backlight(const struct dmi_system_id *d) ++{ ++ use_bios_initial_backlight = 0; ++ return 0; ++} ++ + static struct dmi_system_id video_dmi_table[] __initdata = { + /* + * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 +@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), + }, + }, ++ { ++ .callback = video_ignore_initial_backlight, ++ .ident = "HP Folio 13-2000", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), ++ }, ++ }, + {} + }; + +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index b728880..4ac2593 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { + DMI_MATCH(DMI_BOARD_NAME, "X360"), + }, + }, ++ { ++ .callback = video_detect_force_vendor, ++ .ident = "Asus UL30VT", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), ++ }, ++ }, + { }, + }; + +diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c +index 5b0ba3f..ef01ac0 100644 +--- a/drivers/ata/libata-acpi.c ++++ b/drivers/ata/libata-acpi.c +@@ -76,6 +76,9 @@ acpi_handle ata_dev_acpi_handle(struct ata_device *dev) + acpi_integer adr; + struct ata_port *ap = dev->link->ap; + ++ if (dev->flags & ATA_DFLAG_ACPI_DISABLED) ++ return NULL; ++ + if (ap->flags & ATA_FLAG_ACPI_SATA) { + if (!sata_pmp_attached(ap)) + adr = SATA_ADR(ap->port_no, NO_PORT_MULT); +@@ -945,6 +948,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev) + return rc; + } + ++ dev->flags |= ATA_DFLAG_ACPI_DISABLED; + ata_dev_warn(dev, "ACPI: failed the second time, disabled\n"); + + /* We can safely continue if no _GTF command has been executed +diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c +index 9851093..1853a45 100644 +--- a/drivers/atm/solos-pci.c ++++ b/drivers/atm/solos-pci.c +@@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card) + for (port = 0; tx_pending; tx_pending >>= 1, port++) { + if (tx_pending & 1) { + struct sk_buff *oldskb = card->tx_skb[port]; +- if (oldskb) ++ if (oldskb) { + pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, + oldskb->len, PCI_DMA_TODEVICE); +- ++ card->tx_skb[port] = NULL; ++ } + spin_lock(&card->tx_queue_lock); + skb = skb_dequeue(&card->tx_queue[port]); + if (!skb) +diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c +index 8945f4e..be5f7aa 100644 +--- a/drivers/base/firmware_class.c ++++ b/drivers/base/firmware_class.c +@@ -143,7 +143,7 @@ struct fw_cache_entry { + }; + + struct firmware_priv { +- struct timer_list timeout; ++ struct delayed_work timeout_work; + bool nowait; + struct device dev; + struct firmware_buf *buf; +@@ -246,7 +246,6 @@ static void __fw_free_buf(struct kref *ref) + __func__, buf->fw_id, buf, buf->data, + (unsigned int)buf->size); + +- spin_lock(&fwc->lock); + list_del(&buf->list); + spin_unlock(&fwc->lock); + +@@ -263,7 +262,10 @@ static void __fw_free_buf(struct kref *ref) + + static void fw_free_buf(struct firmware_buf *buf) + { +- kref_put(&buf->ref, __fw_free_buf); ++ struct firmware_cache *fwc = buf->fwc; ++ spin_lock(&fwc->lock); ++ if (!kref_put(&buf->ref, __fw_free_buf)) ++ spin_unlock(&fwc->lock); + } + + /* direct firmware loading support */ +@@ -667,11 +669,18 @@ static struct bin_attribute firmware_attr_data = { + .write = firmware_data_write, + }; + +-static void firmware_class_timeout(u_long data) ++static void firmware_class_timeout_work(struct work_struct *work) + { +- struct firmware_priv *fw_priv = (struct firmware_priv *) data; ++ struct firmware_priv *fw_priv = container_of(work, ++ struct firmware_priv, timeout_work.work); + ++ mutex_lock(&fw_lock); ++ if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { ++ mutex_unlock(&fw_lock); ++ return; ++ } + fw_load_abort(fw_priv); ++ mutex_unlock(&fw_lock); + } + + static struct firmware_priv * +@@ -690,8 +699,8 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, + + fw_priv->nowait = nowait; + fw_priv->fw = firmware; +- setup_timer(&fw_priv->timeout, +- firmware_class_timeout, (u_long) fw_priv); ++ INIT_DELAYED_WORK(&fw_priv->timeout_work, ++ firmware_class_timeout_work); + + f_dev = &fw_priv->dev; + +@@ -858,7 +867,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, + dev_dbg(f_dev->parent, "firmware: direct-loading" + " firmware %s\n", buf->fw_id); + ++ mutex_lock(&fw_lock); + set_bit(FW_STATUS_DONE, &buf->status); ++ mutex_unlock(&fw_lock); + complete_all(&buf->completion); + direct_load = 1; + goto handle_fw; +@@ -894,15 +905,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, + dev_set_uevent_suppress(f_dev, false); + dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); + if (timeout != MAX_SCHEDULE_TIMEOUT) +- mod_timer(&fw_priv->timeout, +- round_jiffies_up(jiffies + timeout)); ++ schedule_delayed_work(&fw_priv->timeout_work, timeout); + + kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); + } + + wait_for_completion(&buf->completion); + +- del_timer_sync(&fw_priv->timeout); ++ cancel_delayed_work_sync(&fw_priv->timeout_work); + + handle_fw: + mutex_lock(&fw_lock); +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index ee82f2f..a1d4ede 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -96,6 +96,7 @@ static struct usb_device_id btusb_table[] = { + { USB_DEVICE(0x0c10, 0x0000) }, + + /* Broadcom BCM20702A0 */ ++ { USB_DEVICE(0x0b05, 0x17b5) }, + { USB_DEVICE(0x04ca, 0x2003) }, + { USB_DEVICE(0x0489, 0xe042) }, + { USB_DEVICE(0x413c, 0x8197) }, +diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c +index e2c17d1..6939009 100644 +--- a/drivers/clk/ux500/u8500_clk.c ++++ b/drivers/clk/ux500/u8500_clk.c +@@ -323,7 +323,7 @@ void u8500_clk_init(void) + clk_register_clkdev(clk, NULL, "gpioblock1"); + + clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", U8500_CLKRST2_BASE, +- BIT(11), 0); ++ BIT(12), 0); + + clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", U8500_CLKRST3_BASE, + BIT(0), 0); +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index f4109fd..52146db 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1538,6 +1538,7 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, ++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, +diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c +index 4f41104..34ab2a8 100644 +--- a/drivers/hwmon/fam15h_power.c ++++ b/drivers/hwmon/fam15h_power.c +@@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); + MODULE_AUTHOR("Andreas Herrmann "); + MODULE_LICENSE("GPL"); + ++/* Family 16h Northbridge's function 4 PCI ID */ ++#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 ++ + /* D18F3 */ + #define REG_NORTHBRIDGE_CAP 0xe8 + +@@ -248,6 +251,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev) + + static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, ++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, + {} + }; + MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); +diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c +index 4dfa1ee..f8f892b 100644 +--- a/drivers/input/joystick/walkera0701.c ++++ b/drivers/input/joystick/walkera0701.c +@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev) + struct walkera_dev *w = input_get_drvdata(dev); + + parport_disable_irq(w->parport); ++ hrtimer_cancel(&w->timer); + } + + static int walkera0701_connect(struct walkera_dev *w, int parport) +@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) + if (parport_claim(w->pardevice)) + goto init_err1; + ++ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ w->timer.function = timer_handler; ++ + w->input_dev = input_allocate_device(); + if (!w->input_dev) + goto init_err2; +@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) + if (err) + goto init_err3; + +- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +- w->timer.function = timer_handler; + return 0; + + init_err3: +@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) + + static void walkera0701_disconnect(struct walkera_dev *w) + { +- hrtimer_cancel(&w->timer); + input_unregister_device(w->input_dev); + parport_release(w->pardevice); + parport_unregister_device(w->pardevice); +diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c +index 6a68041..59347d0 100644 +--- a/drivers/input/keyboard/gpio_keys.c ++++ b/drivers/input/keyboard/gpio_keys.c +@@ -587,6 +587,7 @@ gpio_keys_get_devtree_pdata(struct device *dev) + + i = 0; + for_each_child_of_node(node, pp) { ++ int gpio; + enum of_gpio_flags flags; + + if (!of_find_property(pp, "gpios", NULL)) { +@@ -595,9 +596,19 @@ gpio_keys_get_devtree_pdata(struct device *dev) + continue; + } + ++ gpio = of_get_gpio_flags(pp, 0, &flags); ++ if (gpio < 0) { ++ error = gpio; ++ if (error != -EPROBE_DEFER) ++ dev_err(dev, ++ "Failed to get gpio flags, error: %d\n", ++ error); ++ goto err_free_pdata; ++ } ++ + button = &pdata->buttons[i++]; + +- button->gpio = of_get_gpio_flags(pp, 0, &flags); ++ button->gpio = gpio; + button->active_low = flags & OF_GPIO_ACTIVE_LOW; + + if (of_property_read_u32(pp, "linux,code", &button->code)) { +diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c +index f2142de..8490930 100644 +--- a/drivers/input/keyboard/gpio_keys_polled.c ++++ b/drivers/input/keyboard/gpio_keys_polled.c +@@ -136,6 +136,7 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev) + + i = 0; + for_each_child_of_node(node, pp) { ++ int gpio; + enum of_gpio_flags flags; + + if (!of_find_property(pp, "gpios", NULL)) { +@@ -144,9 +145,19 @@ gpio_keys_polled_get_devtree_pdata(struct device *dev) + continue; + } + ++ gpio = of_get_gpio_flags(pp, 0, &flags); ++ if (gpio < 0) { ++ error = gpio; ++ if (error != -EPROBE_DEFER) ++ dev_err(dev, ++ "Failed to get gpio flags, error: %d\n", ++ error); ++ goto err_free_pdata; ++ } ++ + button = &pdata->buttons[i++]; + +- button->gpio = of_get_gpio_flags(pp, 0, &flags); ++ button->gpio = gpio; + button->active_low = flags & OF_GPIO_ACTIVE_LOW; + + if (of_property_read_u32(pp, "linux,code", &button->code)) { +diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c +index e582922..cc7e0d4 100644 +--- a/drivers/input/mouse/sentelic.c ++++ b/drivers/input/mouse/sentelic.c +@@ -791,7 +791,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse) + fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y); + fsp_set_slot(dev, 1, false, 0, 0); + } +- if (fgrs > 0) { ++ if (fgrs == 1 || (fgrs == 2 && !(packet[0] & FSP_PB0_MFMC_FGR2))) { + input_report_abs(dev, ABS_X, abs_x); + input_report_abs(dev, ABS_Y, abs_y); + } +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index d6cc77a..5f306f7 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -921,6 +921,7 @@ static int __init i8042_platform_init(void) + int retval; + + #ifdef CONFIG_X86 ++ u8 a20_on = 0xdf; + /* Just return if pre-detection shows no i8042 controller exist */ + if (!x86_platform.i8042_detect()) + return -ENODEV; +@@ -960,6 +961,14 @@ static int __init i8042_platform_init(void) + + if (dmi_check_system(i8042_dmi_dritek_table)) + i8042_dritek = true; ++ ++ /* ++ * A20 was already enabled during early kernel init. But some buggy ++ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to ++ * resume from S3. So we do it here and hope that nothing breaks. ++ */ ++ i8042_command(&a20_on, 0x10d1); ++ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */ + #endif /* CONFIG_X86 */ + + return retval; +diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c +index 0a67031..c2bfe92 100644 +--- a/drivers/input/tablet/wacom_wac.c ++++ b/drivers/input/tablet/wacom_wac.c +@@ -2034,7 +2034,8 @@ static const struct wacom_features wacom_features_0xD1 = + .touch_max = 2 }; + static const struct wacom_features wacom_features_0xD2 = + { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, +- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; ++ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, ++ .touch_max = 2 }; + static const struct wacom_features wacom_features_0xD3 = + { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, + 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, +diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c +index 8d082b4..d971817 100644 +--- a/drivers/misc/sgi-xp/xpc_main.c ++++ b/drivers/misc/sgi-xp/xpc_main.c +@@ -53,6 +53,10 @@ + #include + #include "xpc.h" + ++#ifdef CONFIG_X86_64 ++#include ++#endif ++ + /* define two XPC debug device structures to be used with dev_dbg() et al */ + + struct device_driver xpc_dbg_name = { +@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) + return NOTIFY_DONE; + } + ++/* Used to only allow one cpu to complete disconnect */ ++static unsigned int xpc_die_disconnecting; ++ + /* + * Notify other partitions to deactivate from us by first disengaging from all + * references to our memory. +@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void) + long keep_waiting; + long wait_to_print; + ++ if (cmpxchg(&xpc_die_disconnecting, 0, 1)) ++ return; ++ + /* keep xpc_hb_checker thread from doing anything (just in case) */ + xpc_exiting = 1; + +@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void) + * about the lack of a heartbeat. + */ + static int +-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ++xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) + { + #ifdef CONFIG_IA64 /* !!! temporary kludge */ + switch (event) { +@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) + break; + } + #else +- xpc_die_deactivate(); ++ struct die_args *die_args = _die_args; ++ ++ switch (event) { ++ case DIE_TRAP: ++ if (die_args->trapnr == X86_TRAP_DF) ++ xpc_die_deactivate(); ++ ++ if (((die_args->trapnr == X86_TRAP_MF) || ++ (die_args->trapnr == X86_TRAP_XF)) && ++ !user_mode_vm(die_args->regs)) ++ xpc_die_deactivate(); ++ ++ break; ++ case DIE_INT3: ++ case DIE_DEBUG: ++ break; ++ case DIE_OOPS: ++ case DIE_GPF: ++ default: ++ xpc_die_deactivate(); ++ } + #endif + + return NOTIFY_DONE; +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 963e2cc..8233e5e 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -609,8 +609,7 @@ void close_candev(struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + +- if (del_timer_sync(&priv->restart_timer)) +- dev_put(dev); ++ del_timer_sync(&priv->restart_timer); + can_flush_echo_skb(dev); + } + EXPORT_SYMBOL_GPL(close_candev); +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c +index d012982..c4c593d 100644 +--- a/drivers/net/usb/cdc_ether.c ++++ b/drivers/net/usb/cdc_ether.c +@@ -487,6 +487,7 @@ static const struct driver_info wwan_info = { + #define HUAWEI_VENDOR_ID 0x12D1 + #define NOVATEL_VENDOR_ID 0x1410 + #define ZTE_VENDOR_ID 0x19D2 ++#define DELL_VENDOR_ID 0x413C + + static const struct usb_device_id products [] = { + /* +@@ -618,6 +619,20 @@ static const struct usb_device_id products [] = { + .driver_info = 0, + }, + ++/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */ ++{ ++ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8195, USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), ++ .driver_info = 0, ++}, ++ ++/* Dell Wireless 5800 (Novatel E362) - handled by qmi_wwan */ ++{ ++ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x8196, USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), ++ .driver_info = 0, ++}, ++ + /* + * WHITELIST!!! + * +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 1ea91f4..9b950f5 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -383,6 +383,20 @@ static const struct usb_device_id products[] = { + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, ++ { /* Dell Wireless 5800 (Novatel E362) */ ++ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195, ++ USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, ++ USB_CDC_PROTO_NONE), ++ .driver_info = (unsigned long)&qmi_wwan_info, ++ }, ++ { /* Dell Wireless 5800 V2 (Novatel E362) */ ++ USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8196, ++ USB_CLASS_COMM, ++ USB_CDC_SUBCLASS_ETHERNET, ++ USB_CDC_PROTO_NONE), ++ .driver_info = (unsigned long)&qmi_wwan_info, ++ }, + + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ +diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c +index 777cd74..38bc5a7 100644 +--- a/drivers/net/wireless/b43/dma.c ++++ b/drivers/net/wireless/b43/dma.c +@@ -409,7 +409,10 @@ static inline + struct b43_dmadesc_meta *meta) + { + if (meta->skb) { +- dev_kfree_skb_any(meta->skb); ++ if (ring->tx) ++ ieee80211_free_txskb(ring->dev->wl->hw, meta->skb); ++ else ++ dev_kfree_skb_any(meta->skb); + meta->skb = NULL; + } + } +@@ -1454,7 +1457,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(dev->wl->hw, skb); + err = 0; + goto out; + } +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c +index c5a99c8..2911e20 100644 +--- a/drivers/net/wireless/b43/main.c ++++ b/drivers/net/wireless/b43/main.c +@@ -3397,7 +3397,7 @@ static void b43_tx_work(struct work_struct *work) + break; + } + if (unlikely(err)) +- dev_kfree_skb(skb); /* Drop it */ ++ ieee80211_free_txskb(wl->hw, skb); + err = 0; + } + +@@ -3419,7 +3419,7 @@ static void b43_op_tx(struct ieee80211_hw *hw, + + if (unlikely(skb->len < 2 + 2 + 6)) { + /* Too short, this can't be a valid frame. */ +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(hw, skb); + return; + } + B43_WARN_ON(skb_shinfo(skb)->nr_frags); +@@ -4229,8 +4229,12 @@ redo: + + /* Drain all TX queues. */ + for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { +- while (skb_queue_len(&wl->tx_queue[queue_num])) +- dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); ++ while (skb_queue_len(&wl->tx_queue[queue_num])) { ++ struct sk_buff *skb; ++ ++ skb = skb_dequeue(&wl->tx_queue[queue_num]); ++ ieee80211_free_txskb(wl->hw, skb); ++ } + } + + b43_mac_suspend(dev); +diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c +index 3533ab8..a73ff8c 100644 +--- a/drivers/net/wireless/b43/pio.c ++++ b/drivers/net/wireless/b43/pio.c +@@ -196,7 +196,7 @@ static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q) + for (i = 0; i < ARRAY_SIZE(q->packets); i++) { + pack = &(q->packets[i]); + if (pack->skb) { +- dev_kfree_skb_any(pack->skb); ++ ieee80211_free_txskb(q->dev->wl->hw, pack->skb); + pack->skb = NULL; + } + } +@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) + if (unlikely(err == -ENOKEY)) { + /* Drop this packet, as we don't have the encryption key + * anymore and must not transmit it unencrypted. */ +- dev_kfree_skb_any(skb); ++ ieee80211_free_txskb(dev->wl->hw, skb); + err = 0; + goto out; + } +diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h +index a29da67..482476f 100644 +--- a/drivers/net/wireless/b43legacy/b43legacy.h ++++ b/drivers/net/wireless/b43legacy/b43legacy.h +@@ -13,6 +13,7 @@ + + #include + #include ++#include + + #include + +@@ -733,6 +734,10 @@ struct b43legacy_wldev { + + /* Firmware data */ + struct b43legacy_firmware fw; ++ const struct firmware *fwp; /* needed to pass fw pointer */ ++ ++ /* completion struct for firmware loading */ ++ struct completion fw_load_complete; + + /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ + struct list_head list; +diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c +index 18e208e..8c3f70e 100644 +--- a/drivers/net/wireless/b43legacy/main.c ++++ b/drivers/net/wireless/b43legacy/main.c +@@ -1513,9 +1513,17 @@ static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) + "and download the correct firmware (version 3).\n"); + } + ++static void b43legacy_fw_cb(const struct firmware *firmware, void *context) ++{ ++ struct b43legacy_wldev *dev = context; ++ ++ dev->fwp = firmware; ++ complete(&dev->fw_load_complete); ++} ++ + static int do_request_fw(struct b43legacy_wldev *dev, + const char *name, +- const struct firmware **fw) ++ const struct firmware **fw, bool async) + { + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43legacy_fw_header *hdr; +@@ -1528,7 +1536,24 @@ static int do_request_fw(struct b43legacy_wldev *dev, + snprintf(path, ARRAY_SIZE(path), + "b43legacy%s/%s.fw", + modparam_fwpostfix, name); +- err = request_firmware(fw, path, dev->dev->dev); ++ b43legacyinfo(dev->wl, "Loading firmware %s\n", path); ++ if (async) { ++ init_completion(&dev->fw_load_complete); ++ err = request_firmware_nowait(THIS_MODULE, 1, path, ++ dev->dev->dev, GFP_KERNEL, ++ dev, b43legacy_fw_cb); ++ if (err) { ++ b43legacyerr(dev->wl, "Unable to load firmware\n"); ++ return err; ++ } ++ /* stall here until fw ready */ ++ wait_for_completion(&dev->fw_load_complete); ++ if (!dev->fwp) ++ err = -EINVAL; ++ *fw = dev->fwp; ++ } else { ++ err = request_firmware(fw, path, dev->dev->dev); ++ } + if (err) { + b43legacyerr(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); +@@ -1580,7 +1605,7 @@ static void b43legacy_request_firmware(struct work_struct *work) + filename = "ucode4"; + else + filename = "ucode5"; +- err = do_request_fw(dev, filename, &fw->ucode); ++ err = do_request_fw(dev, filename, &fw->ucode, true); + if (err) + goto err_load; + } +@@ -1589,7 +1614,7 @@ static void b43legacy_request_firmware(struct work_struct *work) + filename = "pcm4"; + else + filename = "pcm5"; +- err = do_request_fw(dev, filename, &fw->pcm); ++ err = do_request_fw(dev, filename, &fw->pcm, false); + if (err) + goto err_load; + } +@@ -1607,7 +1632,7 @@ static void b43legacy_request_firmware(struct work_struct *work) + default: + goto err_no_initvals; + } +- err = do_request_fw(dev, filename, &fw->initvals); ++ err = do_request_fw(dev, filename, &fw->initvals, false); + if (err) + goto err_load; + } +@@ -1627,7 +1652,7 @@ static void b43legacy_request_firmware(struct work_struct *work) + default: + goto err_no_initvals; + } +- err = do_request_fw(dev, filename, &fw->initvals_band); ++ err = do_request_fw(dev, filename, &fw->initvals_band, false); + if (err) + goto err_load; + } +diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c +index effb044..4191294 100644 +--- a/drivers/net/wireless/p54/p54usb.c ++++ b/drivers/net/wireless/p54/p54usb.c +@@ -47,6 +47,7 @@ static struct usb_device_id p54u_table[] = { + {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ + {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ + {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ ++ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */ + {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ + {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ + {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ +@@ -82,6 +83,8 @@ static struct usb_device_id p54u_table[] = { + {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ + {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ + {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ ++ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ ++ {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ + {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ + {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ +@@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] = { + {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ + {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ + {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ ++ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */ + {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ + {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ +diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c +index e3ea4b3..46ccbf7 100644 +--- a/drivers/net/wireless/rtlwifi/usb.c ++++ b/drivers/net/wireless/rtlwifi/usb.c +@@ -210,17 +210,16 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, + u16 index = REALTEK_USB_VENQT_CMD_IDX; + int pipe = usb_sndctrlpipe(udev, 0); /* write_out */ + u8 *buffer; +- dma_addr_t dma_addr; + +- wvalue = (u16)(addr&0x0000ffff); +- buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr); ++ wvalue = (u16)(addr & 0x0000ffff); ++ buffer = kmalloc(len, GFP_ATOMIC); + if (!buffer) + return; + memcpy(buffer, data, len); + usb_control_msg(udev, pipe, request, reqtype, wvalue, + index, buffer, len, 50); + +- usb_free_coherent(udev, (size_t)len, buffer, dma_addr); ++ kfree(buffer); + } + + static void _rtl_usb_io_handler_init(struct device *dev, +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index 6c94fc9..6c66c5b 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -256,31 +256,26 @@ struct drv_dev_and_id { + static long local_pci_probe(void *_ddi) + { + struct drv_dev_and_id *ddi = _ddi; +- struct device *dev = &ddi->dev->dev; +- struct device *parent = dev->parent; ++ struct pci_dev *pci_dev = ddi->dev; ++ struct pci_driver *pci_drv = ddi->drv; ++ struct device *dev = &pci_dev->dev; + int rc; + +- /* The parent bridge must be in active state when probing */ +- if (parent) +- pm_runtime_get_sync(parent); +- /* Unbound PCI devices are always set to disabled and suspended. +- * During probe, the device is set to enabled and active and the +- * usage count is incremented. If the driver supports runtime PM, +- * it should call pm_runtime_put_noidle() in its probe routine and +- * pm_runtime_get_noresume() in its remove routine. ++ /* ++ * Unbound PCI devices are always put in D0, regardless of ++ * runtime PM status. During probe, the device is set to ++ * active and the usage count is incremented. If the driver ++ * supports runtime PM, it should call pm_runtime_put_noidle() ++ * in its probe routine and pm_runtime_get_noresume() in its ++ * remove routine. + */ +- pm_runtime_get_noresume(dev); +- pm_runtime_set_active(dev); +- pm_runtime_enable(dev); +- +- rc = ddi->drv->probe(ddi->dev, ddi->id); ++ pm_runtime_get_sync(dev); ++ pci_dev->driver = pci_drv; ++ rc = pci_drv->probe(pci_dev, ddi->id); + if (rc) { +- pm_runtime_disable(dev); +- pm_runtime_set_suspended(dev); +- pm_runtime_put_noidle(dev); ++ pci_dev->driver = NULL; ++ pm_runtime_put_sync(dev); + } +- if (parent) +- pm_runtime_put(parent); + return rc; + } + +@@ -330,10 +325,8 @@ __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) + id = pci_match_device(drv, pci_dev); + if (id) + error = pci_call_probe(drv, pci_dev, id); +- if (error >= 0) { +- pci_dev->driver = drv; ++ if (error >= 0) + error = 0; +- } + } + return error; + } +@@ -369,9 +362,7 @@ static int pci_device_remove(struct device * dev) + } + + /* Undo the runtime PM settings in local_pci_probe() */ +- pm_runtime_disable(dev); +- pm_runtime_set_suspended(dev); +- pm_runtime_put_noidle(dev); ++ pm_runtime_put_sync(dev); + + /* + * If the device is still on, set the power state as "unknown", +@@ -994,6 +985,13 @@ static int pci_pm_runtime_suspend(struct device *dev) + pci_power_t prev = pci_dev->current_state; + int error; + ++ /* ++ * If pci_dev->driver is not set (unbound), the device should ++ * always remain in D0 regardless of the runtime PM status ++ */ ++ if (!pci_dev->driver) ++ return 0; ++ + if (!pm || !pm->runtime_suspend) + return -ENOSYS; + +@@ -1029,6 +1027,13 @@ static int pci_pm_runtime_resume(struct device *dev) + struct pci_dev *pci_dev = to_pci_dev(dev); + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + ++ /* ++ * If pci_dev->driver is not set (unbound), the device should ++ * always remain in D0 regardless of the runtime PM status ++ */ ++ if (!pci_dev->driver) ++ return 0; ++ + if (!pm || !pm->runtime_resume) + return -ENOSYS; + +@@ -1046,8 +1051,16 @@ static int pci_pm_runtime_resume(struct device *dev) + + static int pci_pm_runtime_idle(struct device *dev) + { ++ struct pci_dev *pci_dev = to_pci_dev(dev); + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + ++ /* ++ * If pci_dev->driver is not set (unbound), the device should ++ * always remain in D0 regardless of the runtime PM status ++ */ ++ if (!pci_dev->driver) ++ goto out; ++ + if (!pm) + return -ENOSYS; + +@@ -1057,8 +1070,8 @@ static int pci_pm_runtime_idle(struct device *dev) + return ret; + } + ++out: + pm_runtime_suspend(dev); +- + return 0; + } + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index aabf647..8e06adb 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1900,6 +1900,8 @@ void pci_pm_init(struct pci_dev *dev) + u16 pmc; + + pm_runtime_forbid(&dev->dev); ++ pm_runtime_set_active(&dev->dev); ++ pm_runtime_enable(&dev->dev); + device_enable_async_suspend(&dev->dev); + dev->wakeup_prepared = false; + +diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c +index 0761d90..ebb3e5f 100644 +--- a/drivers/pci/pcie/portdrv_pci.c ++++ b/drivers/pci/pcie/portdrv_pci.c +@@ -134,10 +134,28 @@ static int pcie_port_runtime_resume(struct device *dev) + return 0; + } + ++static int pci_dev_pme_poll(struct pci_dev *pdev, void *data) ++{ ++ bool *pme_poll = data; ++ ++ if (pdev->pme_poll) ++ *pme_poll = true; ++ return 0; ++} ++ + static int pcie_port_runtime_idle(struct device *dev) + { ++ struct pci_dev *pdev = to_pci_dev(dev); ++ bool pme_poll = false; ++ ++ /* ++ * If any subordinate device needs pme poll, we should keep ++ * the port in D0, because we need port in D0 to poll it. ++ */ ++ pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll); + /* Delay for a short while to prevent too frequent suspend/resume */ +- pm_schedule_suspend(dev, 10); ++ if (!pme_poll) ++ pm_schedule_suspend(dev, 10); + return -EBUSY; + } + #else +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 7a451ff..7eb4d16 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -2686,7 +2686,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) + if (PCI_FUNC(dev->devfn)) + return; + /* +- * RICOH 0xe823 SD/MMC card reader fails to recognize ++ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize + * certain types of SD/MMC cards. Lowering the SD base + * clock frequency from 200Mhz to 50Mhz fixes this issue. + * +@@ -2697,7 +2697,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) + * 0xf9 - Key register for 0x150 + * 0xfc - key register for 0xe1 + */ +- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { ++ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || ++ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { + pci_write_config_byte(dev, 0xf9, 0xfc); + pci_write_config_byte(dev, 0x150, 0x10); + pci_write_config_byte(dev, 0xf9, 0x00); +@@ -2724,6 +2725,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) + } + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); ++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); + DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); + #endif /*CONFIG_MMC_RICOH_MMC*/ +diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c +index 26b5d4b..767f526 100644 +--- a/drivers/pnp/pnpacpi/core.c ++++ b/drivers/pnp/pnpacpi/core.c +@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) + if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ + return 0 + #define TEST_ALPHA(c) \ +- if (!('@' <= (c) || (c) <= 'Z')) \ ++ if (!('A' <= (c) && (c) <= 'Z')) \ + return 0 + static int __init ispnpidacpi(const char *id) + { +@@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) + return -ENODEV; + } + ++ if (WARN_ON_ONCE(acpi_dev != dev->data)) ++ dev->data = acpi_dev; ++ + ret = pnpacpi_build_resource_template(dev, &buffer); + if (ret) + return ret; +diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c +index f8a0aab..5143629 100644 +--- a/drivers/rtc/class.c ++++ b/drivers/rtc/class.c +@@ -244,7 +244,6 @@ void rtc_device_unregister(struct rtc_device *rtc) + rtc_proc_del_device(rtc); + device_unregister(&rtc->dev); + rtc->ops = NULL; +- ida_simple_remove(&rtc_ida, rtc->id); + mutex_unlock(&rtc->ops_lock); + put_device(&rtc->dev); + } +diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c +index 07bf193..75485c4 100644 +--- a/drivers/rtc/rtc-vt8500.c ++++ b/drivers/rtc/rtc-vt8500.c +@@ -70,7 +70,7 @@ + | ALARM_SEC_BIT) + + #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ +-#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ ++#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */ + #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ + #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ + #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ +@@ -119,7 +119,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) + tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); + tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); + tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); +- tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); ++ tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1; + tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); + tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; +@@ -138,8 +138,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) + } + + writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) +- | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) +- | (bin2bcd(tm->tm_mday)), ++ | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) ++ | (bin2bcd(tm->tm_mday)) ++ | ((tm->tm_year >= 200) << DATE_CENTURY_S), + vt8500_rtc->regbase + VT8500_RTC_DS); + writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) + | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) +@@ -249,7 +250,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) + } + + /* Enable RTC and set it to 24-hour mode */ +- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, ++ writel(VT8500_RTC_CR_ENABLE, + vt8500_rtc->regbase + VT8500_RTC_CR); + + vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, +diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c +index b05f5f7..f10ab70 100644 +--- a/drivers/staging/bcm/InterfaceInit.c ++++ b/drivers/staging/bcm/InterfaceInit.c +@@ -4,11 +4,12 @@ static struct usb_device_id InterfaceUsbtable[] = { + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) }, +- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) }, ++ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) }, + { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) }, ++ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) }, + { } + }; + MODULE_DEVICE_TABLE(usb, InterfaceUsbtable); +diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h +index 866924e..241bf97 100644 +--- a/drivers/staging/bcm/InterfaceInit.h ++++ b/drivers/staging/bcm/InterfaceInit.h +@@ -8,11 +8,11 @@ + #define BCM_USB_PRODUCT_ID_T3 0x0300 + #define BCM_USB_PRODUCT_ID_T3B 0x0210 + #define BCM_USB_PRODUCT_ID_T3L 0x0220 +-#define BCM_USB_PRODUCT_ID_SM250 0xbccd + #define BCM_USB_PRODUCT_ID_SYM 0x15E + #define BCM_USB_PRODUCT_ID_1901 0xe017 + #define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */ + #define BCM_USB_PRODUCT_ID_ZTE_226 0x172 ++#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */ + #define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007 + + #define BCM_USB_MINOR_BASE 192 +diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c +index 732f2ad..5249223 100644 +--- a/drivers/staging/omapdrm/omap_crtc.c ++++ b/drivers/staging/omapdrm/omap_crtc.c +@@ -19,7 +19,7 @@ + + #include "omap_drv.h" + +-#include "drm_mode.h" ++#include + #include "drm_crtc.h" + #include "drm_crtc_helper.h" + +diff --git a/drivers/staging/telephony/ixj.c b/drivers/staging/telephony/ixj.c +index 1cfa0b0..cf6aa83 100644 +--- a/drivers/staging/telephony/ixj.c ++++ b/drivers/staging/telephony/ixj.c +@@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j) + + ixj_fsk_alloc(j); + +- strcpy(sdmf1, j->cid_send.month); +- strcat(sdmf1, j->cid_send.day); +- strcat(sdmf1, j->cid_send.hour); +- strcat(sdmf1, j->cid_send.min); +- strcpy(sdmf2, j->cid_send.number); +- strcpy(sdmf3, j->cid_send.name); ++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); ++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); ++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); + + len1 = strlen(sdmf1); + len2 = strlen(sdmf2); +@@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j) + ixj_pre_cid(j); + } + j->flags.cidcw_ack = 0; +- strcpy(sdmf1, j->cid_send.month); +- strcat(sdmf1, j->cid_send.day); +- strcat(sdmf1, j->cid_send.hour); +- strcat(sdmf1, j->cid_send.min); +- strcpy(sdmf2, j->cid_send.number); +- strcpy(sdmf3, j->cid_send.name); ++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); ++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); ++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); ++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); + + len1 = strlen(sdmf1); + len2 = strlen(sdmf2); +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index f69d029..b726c49 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -385,8 +385,9 @@ EXPORT_SYMBOL_GPL(ci13xxx_add_device); + + void ci13xxx_remove_device(struct platform_device *pdev) + { ++ int id = pdev->id; + platform_device_unregister(pdev); +- ida_simple_remove(&ci_ida, pdev->id); ++ ida_simple_remove(&ci_ida, id); + } + EXPORT_SYMBOL_GPL(ci13xxx_remove_device); + +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 6e49ec6..8d809a8 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -787,6 +787,10 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info) + tmp.flags = ASYNC_LOW_LATENCY; + tmp.xmit_fifo_size = acm->writesize; + tmp.baud_base = le32_to_cpu(acm->line.dwDTERate); ++ tmp.close_delay = acm->port.close_delay / 10; ++ tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ++ ASYNC_CLOSING_WAIT_NONE : ++ acm->port.closing_wait / 10; + + if (copy_to_user(info, &tmp, sizeof(tmp))) + return -EFAULT; +@@ -794,6 +798,37 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info) + return 0; + } + ++static int set_serial_info(struct acm *acm, ++ struct serial_struct __user *newinfo) ++{ ++ struct serial_struct new_serial; ++ unsigned int closing_wait, close_delay; ++ int retval = 0; ++ ++ if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) ++ return -EFAULT; ++ ++ close_delay = new_serial.close_delay * 10; ++ closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ++ ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; ++ ++ mutex_lock(&acm->port.mutex); ++ ++ if (!capable(CAP_SYS_ADMIN)) { ++ if ((close_delay != acm->port.close_delay) || ++ (closing_wait != acm->port.closing_wait)) ++ retval = -EPERM; ++ else ++ retval = -EOPNOTSUPP; ++ } else { ++ acm->port.close_delay = close_delay; ++ acm->port.closing_wait = closing_wait; ++ } ++ ++ mutex_unlock(&acm->port.mutex); ++ return retval; ++} ++ + static int acm_tty_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) + { +@@ -804,6 +839,9 @@ static int acm_tty_ioctl(struct tty_struct *tty, + case TIOCGSERIAL: /* gets serial port data */ + rv = get_serial_info(acm, (struct serial_struct __user *) arg); + break; ++ case TIOCSSERIAL: ++ rv = set_serial_info(acm, (struct serial_struct __user *) arg); ++ break; + } + + return rv; +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index 1ed5afd..a557658 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1806,29 +1806,8 @@ free_interfaces: + goto free_interfaces; + } + +- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), +- USB_REQ_SET_CONFIGURATION, 0, configuration, 0, +- NULL, 0, USB_CTRL_SET_TIMEOUT); +- if (ret < 0) { +- /* All the old state is gone, so what else can we do? +- * The device is probably useless now anyway. +- */ +- cp = NULL; +- } +- +- dev->actconfig = cp; +- if (!cp) { +- usb_set_device_state(dev, USB_STATE_ADDRESS); +- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); +- /* Leave LPM disabled while the device is unconfigured. */ +- mutex_unlock(hcd->bandwidth_mutex); +- usb_autosuspend_device(dev); +- goto free_interfaces; +- } +- mutex_unlock(hcd->bandwidth_mutex); +- usb_set_device_state(dev, USB_STATE_CONFIGURED); +- +- /* Initialize the new interface structures and the ++ /* ++ * Initialize the new interface structures and the + * hc/hcd/usbcore interface/endpoint state. + */ + for (i = 0; i < nintf; ++i) { +@@ -1872,6 +1851,35 @@ free_interfaces: + } + kfree(new_interfaces); + ++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ++ USB_REQ_SET_CONFIGURATION, 0, configuration, 0, ++ NULL, 0, USB_CTRL_SET_TIMEOUT); ++ if (ret < 0 && cp) { ++ /* ++ * All the old state is gone, so what else can we do? ++ * The device is probably useless now anyway. ++ */ ++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); ++ for (i = 0; i < nintf; ++i) { ++ usb_disable_interface(dev, cp->interface[i], true); ++ put_device(&cp->interface[i]->dev); ++ cp->interface[i] = NULL; ++ } ++ cp = NULL; ++ } ++ ++ dev->actconfig = cp; ++ mutex_unlock(hcd->bandwidth_mutex); ++ ++ if (!cp) { ++ usb_set_device_state(dev, USB_STATE_ADDRESS); ++ ++ /* Leave LPM disabled while the device is unconfigured. */ ++ usb_autosuspend_device(dev); ++ return ret; ++ } ++ usb_set_device_state(dev, USB_STATE_CONFIGURED); ++ + if (cp->string == NULL && + !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) + cp->string = usb_cache_string(dev, cp->desc.iConfiguration); +diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c +index 95bc94f..2dc0f07 100644 +--- a/drivers/usb/gadget/f_ecm.c ++++ b/drivers/usb/gadget/f_ecm.c +@@ -808,9 +808,9 @@ fail: + /* we might as well release our claims on endpoints */ + if (ecm->notify) + ecm->notify->driver_data = NULL; +- if (ecm->port.out_ep->desc) ++ if (ecm->port.out_ep) + ecm->port.out_ep->driver_data = NULL; +- if (ecm->port.in_ep->desc) ++ if (ecm->port.in_ep) + ecm->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c +index 1a7b2dd..a9cf2052 100644 +--- a/drivers/usb/gadget/f_eem.c ++++ b/drivers/usb/gadget/f_eem.c +@@ -319,10 +319,9 @@ fail: + if (f->hs_descriptors) + usb_free_descriptors(f->hs_descriptors); + +- /* we might as well release our claims on endpoints */ +- if (eem->port.out_ep->desc) ++ if (eem->port.out_ep) + eem->port.out_ep->driver_data = NULL; +- if (eem->port.in_ep->desc) ++ if (eem->port.in_ep) + eem->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c +index 8ed1259..b2accca 100644 +--- a/drivers/usb/gadget/f_midi.c ++++ b/drivers/usb/gadget/f_midi.c +@@ -415,6 +415,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f) + midi->id = NULL; + + usb_free_descriptors(f->descriptors); ++ usb_free_descriptors(f->hs_descriptors); + kfree(midi); + } + +diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c +index b651b52..5e04e93 100644 +--- a/drivers/usb/gadget/f_ncm.c ++++ b/drivers/usb/gadget/f_ncm.c +@@ -1259,9 +1259,9 @@ fail: + /* we might as well release our claims on endpoints */ + if (ncm->notify) + ncm->notify->driver_data = NULL; +- if (ncm->port.out_ep->desc) ++ if (ncm->port.out_ep) + ncm->port.out_ep->driver_data = NULL; +- if (ncm->port.in_ep->desc) ++ if (ncm->port.in_ep) + ncm->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c +index 8ee9268..a6c19a4 100644 +--- a/drivers/usb/gadget/f_phonet.c ++++ b/drivers/usb/gadget/f_phonet.c +@@ -531,7 +531,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) + + req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); + if (!req) +- goto err; ++ goto err_req; + + req->complete = pn_rx_complete; + fp->out_reqv[i] = req; +@@ -540,14 +540,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) + /* Outgoing USB requests */ + fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); + if (!fp->in_req) +- goto err; ++ goto err_req; + + INFO(cdev, "USB CDC Phonet function\n"); + INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, + fp->out_ep->name, fp->in_ep->name); + return 0; + ++err_req: ++ for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) ++ usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); + err: ++ + if (fp->out_ep) + fp->out_ep->driver_data = NULL; + if (fp->in_ep) +diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c +index b1681e4..47953fe 100644 +--- a/drivers/usb/gadget/f_rndis.c ++++ b/drivers/usb/gadget/f_rndis.c +@@ -803,9 +803,9 @@ fail: + /* we might as well release our claims on endpoints */ + if (rndis->notify) + rndis->notify->driver_data = NULL; +- if (rndis->port.out_ep->desc) ++ if (rndis->port.out_ep) + rndis->port.out_ep->driver_data = NULL; +- if (rndis->port.in_ep->desc) ++ if (rndis->port.in_ep) + rndis->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c +index 4060c0b..deb437c 100644 +--- a/drivers/usb/gadget/f_subset.c ++++ b/drivers/usb/gadget/f_subset.c +@@ -370,9 +370,9 @@ fail: + usb_free_descriptors(f->hs_descriptors); + + /* we might as well release our claims on endpoints */ +- if (geth->port.out_ep->desc) ++ if (geth->port.out_ep) + geth->port.out_ep->driver_data = NULL; +- if (geth->port.in_ep->desc) ++ if (geth->port.in_ep) + geth->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); +diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c +index 2a8bf06..10f13c1 100644 +--- a/drivers/usb/gadget/f_uvc.c ++++ b/drivers/usb/gadget/f_uvc.c +@@ -417,7 +417,6 @@ uvc_register_video(struct uvc_device *uvc) + return -ENOMEM; + + video->parent = &cdev->gadget->dev; +- video->minor = -1; + video->fops = &uvc_v4l2_fops; + video->release = video_device_release; + strncpy(video->name, cdev->gadget->name, sizeof(video->name)); +@@ -577,23 +576,12 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f) + + INFO(cdev, "uvc_function_unbind\n"); + +- if (uvc->vdev) { +- if (uvc->vdev->minor == -1) +- video_device_release(uvc->vdev); +- else +- video_unregister_device(uvc->vdev); +- uvc->vdev = NULL; +- } +- +- if (uvc->control_ep) +- uvc->control_ep->driver_data = NULL; +- if (uvc->video.ep) +- uvc->video.ep->driver_data = NULL; ++ video_unregister_device(uvc->vdev); ++ uvc->control_ep->driver_data = NULL; ++ uvc->video.ep->driver_data = NULL; + +- if (uvc->control_req) { +- usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); +- kfree(uvc->control_buf); +- } ++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); ++ kfree(uvc->control_buf); + + kfree(f->descriptors); + kfree(f->hs_descriptors); +@@ -740,7 +728,22 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) + return 0; + + error: +- uvc_function_unbind(c, f); ++ if (uvc->vdev) ++ video_device_release(uvc->vdev); ++ ++ if (uvc->control_ep) ++ uvc->control_ep->driver_data = NULL; ++ if (uvc->video.ep) ++ uvc->video.ep->driver_data = NULL; ++ ++ if (uvc->control_req) { ++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); ++ kfree(uvc->control_buf); ++ } ++ ++ kfree(f->descriptors); ++ kfree(f->hs_descriptors); ++ kfree(f->ss_descriptors); + return ret; + } + +diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c +index 97e68b3..4959609 100644 +--- a/drivers/usb/gadget/tcm_usb_gadget.c ++++ b/drivers/usb/gadget/tcm_usb_gadget.c +@@ -2139,6 +2139,7 @@ static struct usb_descriptor_header *uasp_fs_function_desc[] = { + (struct usb_descriptor_header *) &uasp_status_pipe_desc, + (struct usb_descriptor_header *) &uasp_fs_cmd_desc, + (struct usb_descriptor_header *) &uasp_cmd_pipe_desc, ++ NULL, + }; + + static struct usb_descriptor_header *uasp_hs_function_desc[] = { +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c +index 2cb7d37..f42b68e 100644 +--- a/drivers/usb/host/ehci-pci.c ++++ b/drivers/usb/host/ehci-pci.c +@@ -334,7 +334,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) + pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == 0x1E26 || + pdev->device == 0x8C2D || +- pdev->device == 0x8C26); ++ pdev->device == 0x8C26 || ++ pdev->device == 0x9C26); + } + + static void ehci_enable_xhci_companion(void) +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c +index 4b66374..3d98902 100644 +--- a/drivers/usb/host/ehci-q.c ++++ b/drivers/usb/host/ehci-q.c +@@ -264,15 +264,9 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) + __releases(ehci->lock) + __acquires(ehci->lock) + { +- if (likely (urb->hcpriv != NULL)) { +- struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; +- +- /* S-mask in a QH means it's an interrupt urb */ +- if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { +- +- /* ... update hc-wide periodic stats (for usbfs) */ +- ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; +- } ++ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { ++ /* ... update hc-wide periodic stats */ ++ ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; + } + + if (unlikely(urb->unlinked)) { +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c +index 7cf3da7..528a540 100644 +--- a/drivers/usb/host/ehci-sched.c ++++ b/drivers/usb/host/ehci-sched.c +@@ -1646,7 +1646,7 @@ static void itd_link_urb( + + /* don't need that schedule data any more */ + iso_sched_free (stream, iso_sched); +- urb->hcpriv = NULL; ++ urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +@@ -2045,7 +2045,7 @@ static void sitd_link_urb( + + /* don't need that schedule data any more */ + iso_sched_free (stream, sched); +- urb->hcpriv = NULL; ++ urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c +index c5a1ea9..9d00d47 100644 +--- a/drivers/usb/host/ohci-q.c ++++ b/drivers/usb/host/ohci-q.c +@@ -1128,6 +1128,25 @@ dl_done_list (struct ohci_hcd *ohci) + + while (td) { + struct td *td_next = td->next_dl_td; ++ struct ed *ed = td->ed; ++ ++ /* ++ * Some OHCI controllers (NVIDIA for sure, maybe others) ++ * occasionally forget to add TDs to the done queue. Since ++ * TDs for a given endpoint are always processed in order, ++ * if we find a TD on the donelist then all of its ++ * predecessors must be finished as well. ++ */ ++ for (;;) { ++ struct td *td2; ++ ++ td2 = list_first_entry(&ed->td_list, struct td, ++ td_list); ++ if (td2 == td) ++ break; ++ takeback_td(ohci, td2); ++ } ++ + takeback_td(ohci, td); + td = td_next; + } +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c +index 39f9e4a..eb5563a 100644 +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -723,6 +723,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, + } + + #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 ++#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 + + bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) + { +@@ -736,7 +737,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) + { + return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && + pdev->vendor == PCI_VENDOR_ID_INTEL && +- pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI; ++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); + } + + bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 487bc08..fb51c70 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -205,7 +205,12 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, + + next = xhci_segment_alloc(xhci, cycle_state, flags); + if (!next) { +- xhci_free_segments_for_ring(xhci, *first); ++ prev = *first; ++ while (prev) { ++ next = prev->next; ++ xhci_segment_free(xhci, prev); ++ prev = next; ++ } + return -ENOMEM; + } + xhci_link_segments(xhci, prev, next, type); +@@ -258,7 +263,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, + return ring; + + fail: +- xhci_ring_free(xhci, ring); ++ kfree(ring); + return NULL; + } + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 8345d7c..dcb72f7 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -29,6 +29,7 @@ + /* Device for a quirk */ + #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 + #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 ++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 + + #define PCI_VENDOR_ID_ETRON 0x1b6f + #define PCI_DEVICE_ID_ASROCK_P67 0x7023 +@@ -58,8 +59,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + + /* Look for vendor-specific quirks */ + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && +- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) { +- if (pdev->revision == 0x0) { ++ (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || ++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { ++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && ++ pdev->revision == 0x0) { + xhci->quirks |= XHCI_RESET_EP_QUIRK; + xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" + " endpoint cmd after reset endpoint\n"); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 4e1a894..1189cf3 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3071,11 +3071,11 @@ static u32 xhci_td_remainder(unsigned int remainder) + } + + /* +- * For xHCI 1.0 host controllers, TD size is the number of packets remaining in +- * the TD (*not* including this TRB). ++ * For xHCI 1.0 host controllers, TD size is the number of max packet sized ++ * packets remaining in the TD (*not* including this TRB). + * + * Total TD packet count = total_packet_count = +- * roundup(TD size in bytes / wMaxPacketSize) ++ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) + * + * Packets transferred up to and including this TRB = packets_transferred = + * rounddown(total bytes transferred including this TRB / wMaxPacketSize) +@@ -3083,15 +3083,16 @@ static u32 xhci_td_remainder(unsigned int remainder) + * TD size = total_packet_count - packets_transferred + * + * It must fit in bits 21:17, so it can't be bigger than 31. ++ * The last TRB in a TD must have the TD size set to zero. + */ +- + static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, +- unsigned int total_packet_count, struct urb *urb) ++ unsigned int total_packet_count, struct urb *urb, ++ unsigned int num_trbs_left) + { + int packets_transferred; + + /* One TRB with a zero-length data packet. */ +- if (running_total == 0 && trb_buff_len == 0) ++ if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0)) + return 0; + + /* All the TRB queueing functions don't count the current TRB in +@@ -3100,7 +3101,9 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len, + packets_transferred = (running_total + trb_buff_len) / + usb_endpoint_maxp(&urb->ep->desc); + +- return xhci_td_remainder(total_packet_count - packets_transferred); ++ if ((total_packet_count - packets_transferred) > 31) ++ return 31 << 17; ++ return (total_packet_count - packets_transferred) << 17; + } + + static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, +@@ -3127,7 +3130,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + + num_trbs = count_sg_trbs_needed(xhci, urb); + num_sgs = urb->num_mapped_sgs; +- total_packet_count = roundup(urb->transfer_buffer_length, ++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, + usb_endpoint_maxp(&urb->ep->desc)); + + trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], +@@ -3210,7 +3213,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + running_total); + } else { + remainder = xhci_v1_0_td_remainder(running_total, +- trb_buff_len, total_packet_count, urb); ++ trb_buff_len, total_packet_count, urb, ++ num_trbs - 1); + } + length_field = TRB_LEN(trb_buff_len) | + remainder | +@@ -3318,7 +3322,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + start_cycle = ep_ring->cycle_state; + + running_total = 0; +- total_packet_count = roundup(urb->transfer_buffer_length, ++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length, + usb_endpoint_maxp(&urb->ep->desc)); + /* How much data is in the first TRB? */ + addr = (u64) urb->transfer_dma; +@@ -3364,7 +3368,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + running_total); + } else { + remainder = xhci_v1_0_td_remainder(running_total, +- trb_buff_len, total_packet_count, urb); ++ trb_buff_len, total_packet_count, urb, ++ num_trbs - 1); + } + length_field = TRB_LEN(trb_buff_len) | + remainder | +@@ -3627,7 +3632,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + addr = start_addr + urb->iso_frame_desc[i].offset; + td_len = urb->iso_frame_desc[i].length; + td_remain_len = td_len; +- total_packet_count = roundup(td_len, ++ total_packet_count = DIV_ROUND_UP(td_len, + usb_endpoint_maxp(&urb->ep->desc)); + /* A zero-length transfer still involves at least one packet. */ + if (total_packet_count == 0) +@@ -3706,7 +3711,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + } else { + remainder = xhci_v1_0_td_remainder( + running_total, trb_buff_len, +- total_packet_count, urb); ++ total_packet_count, urb, ++ (trbs_per_td - j - 1)); + } + length_field = TRB_LEN(trb_buff_len) | + remainder | +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index c9e419f..389829e 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -480,7 +480,7 @@ static bool compliance_mode_recovery_timer_quirk_check(void) + if (strstr(dmi_product_name, "Z420") || + strstr(dmi_product_name, "Z620") || + strstr(dmi_product_name, "Z820") || +- strstr(dmi_product_name, "Z1")) ++ strstr(dmi_product_name, "Z1 Workstation")) + return true; + + return false; +@@ -2254,7 +2254,7 @@ static bool xhci_is_async_ep(unsigned int ep_type) + + static bool xhci_is_sync_in_ep(unsigned int ep_type) + { +- return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); ++ return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); + } + + static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) +diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig +index a8f0523..fecde69 100644 +--- a/drivers/usb/misc/Kconfig ++++ b/drivers/usb/misc/Kconfig +@@ -246,6 +246,7 @@ config USB_YUREX + + config USB_EZUSB_FX2 + tristate "Functions for loading firmware on EZUSB chips" ++ depends on USB + help + Say Y here if you need EZUSB device support. + (Cypress FX/FX2/FX2LP microcontrollers) +diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c +index e19da82..3a6c2fd 100644 +--- a/drivers/usb/musb/cppi_dma.c ++++ b/drivers/usb/musb/cppi_dma.c +@@ -1314,6 +1314,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) + + return IRQ_HANDLED; + } ++EXPORT_SYMBOL_GPL(cppi_interrupt); + + /* Instantiate a software object representing a DMA controller. */ + struct dma_controller *__devinit +diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c +index ff5f112..72b9239 100644 +--- a/drivers/usb/musb/musb_dsps.c ++++ b/drivers/usb/musb/musb_dsps.c +@@ -377,7 +377,8 @@ static int dsps_musb_init(struct musb *musb) + /* mentor core register starts at offset of 0x400 from musb base */ + musb->mregs += wrp->musb_core_offset; + +- /* Get the NOP PHY */ ++ /* NOP driver needs change if supporting dual instance */ ++ usb_nop_xceiv_register(); + musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); + if (IS_ERR_OR_NULL(musb->xceiv)) + return -ENODEV; +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index eb033fc..402e597 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ ++ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ + { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ + { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index be84587..2641d36 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -192,6 +192,7 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, ++ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, + { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, +@@ -1781,7 +1782,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) + struct usb_device *udev = serial->dev; + + if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || +- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100"))) ++ (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) + return ftdi_jtag_probe(serial); + + return 0; +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 57c12ef..049b6e7 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -752,6 +752,12 @@ + #define TTI_VID 0x103E /* Vendor Id */ + #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */ + ++/* ++ * Newport Cooperation (www.newport.com) ++ */ ++#define NEWPORT_VID 0x104D ++#define NEWPORT_AGILIS_PID 0x3000 ++ + /* Interbiometrics USB I/O Board */ + /* Developed for Interbiometrics by Rudolf Gugler */ + #define INTERBIOMETRICS_VID 0x1209 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index edc64bb..da36dc7 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb); + #define OPTION_PRODUCT_GTM380_MODEM 0x7201 + + #define HUAWEI_VENDOR_ID 0x12D1 ++#define HUAWEI_PRODUCT_E173 0x140C + #define HUAWEI_PRODUCT_K4505 0x1464 + #define HUAWEI_PRODUCT_K3765 0x1465 + #define HUAWEI_PRODUCT_K4605 0x14C6 +@@ -553,6 +554,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, + { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, + { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), ++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), +@@ -884,6 +887,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, +@@ -904,20 +911,34 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, +@@ -1097,6 +1118,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), +diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig +index 0ae7bb6..eab04a6 100644 +--- a/drivers/usb/storage/Kconfig ++++ b/drivers/usb/storage/Kconfig +@@ -203,7 +203,7 @@ config USB_STORAGE_ENE_UB6250 + + config USB_UAS + tristate "USB Attached SCSI" +- depends on USB && SCSI ++ depends on USB && SCSI && BROKEN + help + The USB Attached SCSI protocol is supported by some USB + storage devices. It permits higher performance by supporting +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index e639584..286c30c 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -135,6 +135,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, + unsigned head; + int i; + ++ /* ++ * We require lowmem mappings for the descriptors because ++ * otherwise virt_to_phys will give us bogus addresses in the ++ * virtqueue. ++ */ ++ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); ++ + desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); + if (!desc) + return -ENOMEM; +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c +index 790b3cd..772428d 100644 +--- a/fs/binfmt_misc.c ++++ b/fs/binfmt_misc.c +@@ -176,7 +176,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) + goto _error; + bprm->argc ++; + +- bprm->interp = iname; /* for binfmt_script */ ++ /* Update interp in case binfmt_script needs it. */ ++ retval = bprm_change_interp(iname, bprm); ++ if (retval < 0) ++ goto _error; + + interp_file = open_exec (iname); + retval = PTR_ERR (interp_file); +diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c +index d3b8c1f..df49d48 100644 +--- a/fs/binfmt_script.c ++++ b/fs/binfmt_script.c +@@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) + retval = copy_strings_kernel(1, &i_name, bprm); + if (retval) return retval; + bprm->argc++; +- bprm->interp = interp; ++ retval = bprm_change_interp(interp, bprm); ++ if (retval < 0) ++ return retval; + + /* + * OK, now restart the process with the interpreter's dentry. +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c +index 1c576e8..dd538b4 100644 +--- a/fs/cifs/readdir.c ++++ b/fs/cifs/readdir.c +@@ -66,18 +66,21 @@ static inline void dump_cifs_file_struct(struct file *file, char *label) + #endif /* DEBUG2 */ + + /* ++ * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT ++ * + * Find the dentry that matches "name". If there isn't one, create one. If it's + * a negative dentry or the uniqueid changed, then drop it and recreate it. + */ +-static struct dentry * +-cifs_readdir_lookup(struct dentry *parent, struct qstr *name, ++static void ++cifs_prime_dcache(struct dentry *parent, struct qstr *name, + struct cifs_fattr *fattr) + { + struct dentry *dentry, *alias; + struct inode *inode; + struct super_block *sb = parent->d_inode->i_sb; ++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + +- cFYI(1, "For %s", name->name); ++ cFYI(1, "%s: for %s", __func__, name->name); + + if (parent->d_op && parent->d_op->d_hash) + parent->d_op->d_hash(parent, parent->d_inode, name); +@@ -87,37 +90,42 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, + dentry = d_lookup(parent, name); + if (dentry) { + int err; ++ + inode = dentry->d_inode; +- /* update inode in place if i_ino didn't change */ +- if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { +- cifs_fattr_to_inode(inode, fattr); +- return dentry; ++ if (inode) { ++ /* ++ * If we're generating inode numbers, then we don't ++ * want to clobber the existing one with the one that ++ * the readdir code created. ++ */ ++ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) ++ fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; ++ ++ /* update inode in place if i_ino didn't change */ ++ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { ++ cifs_fattr_to_inode(inode, fattr); ++ goto out; ++ } + } + err = d_invalidate(dentry); + dput(dentry); + if (err) +- return NULL; ++ return; + } + + dentry = d_alloc(parent, name); +- if (dentry == NULL) +- return NULL; ++ if (!dentry) ++ return; + + inode = cifs_iget(sb, fattr); +- if (!inode) { +- dput(dentry); +- return NULL; +- } ++ if (!inode) ++ goto out; + + alias = d_materialise_unique(dentry, inode); +- if (alias != NULL) { +- dput(dentry); +- if (IS_ERR(alias)) +- return NULL; +- dentry = alias; +- } +- +- return dentry; ++ if (alias && !IS_ERR(alias)) ++ dput(alias); ++out: ++ dput(dentry); + } + + static void +@@ -652,7 +660,6 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + struct cifs_dirent de = { NULL, }; + struct cifs_fattr fattr; +- struct dentry *dentry; + struct qstr name; + int rc = 0; + ino_t ino; +@@ -723,13 +730,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir, + */ + fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; + +- ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); +- dentry = cifs_readdir_lookup(file->f_dentry, &name, &fattr); ++ cifs_prime_dcache(file->f_dentry, &name, &fattr); + ++ ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); + rc = filldir(dirent, name.name, name.len, file->f_pos, ino, + fattr.cf_dtype); +- +- dput(dentry); + return rc; + } + +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index 34cea27..591bf19 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -53,6 +53,13 @@ send_nt_cancel(struct TCP_Server_Info *server, void *buf, + mutex_unlock(&server->srv_mutex); + return rc; + } ++ ++ /* ++ * The response to this call was already factored into the sequence ++ * number when the call went out, so we must adjust it back downward ++ * after signing here. ++ */ ++ --server->sequence_number; + rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); + mutex_unlock(&server->srv_mutex); + +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index cf33622..e7f9dbc 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -425,7 +425,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) + } + + cFYI(1, "sec_flags 0x%x", sec_flags); +- if (sec_flags & CIFSSEC_MUST_SIGN) { ++ if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { + cFYI(1, "Signing required"); + if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED | + SMB2_NEGOTIATE_SIGNING_ENABLED))) { +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 76d974c..1a52868 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -144,9 +144,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, + + *sent = 0; + +- if (ssocket == NULL) +- return -ENOTSOCK; /* BB eventually add reconnect code here */ +- + smb_msg.msg_name = (struct sockaddr *) &server->dstaddr; + smb_msg.msg_namelen = sizeof(struct sockaddr); + smb_msg.msg_control = NULL; +@@ -291,6 +288,9 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) + struct socket *ssocket = server->ssocket; + int val = 1; + ++ if (ssocket == NULL) ++ return -ENOTSOCK; ++ + cFYI(1, "Sending smb: smb_len=%u", smb_buf_length); + dump_smb(iov[0].iov_base, iov[0].iov_len); + +diff --git a/fs/dcache.c b/fs/dcache.c +index 3a463d0..0d0adb6 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1583,7 +1583,7 @@ EXPORT_SYMBOL(d_find_any_alias); + */ + struct dentry *d_obtain_alias(struct inode *inode) + { +- static const struct qstr anonstring = { .name = "" }; ++ static const struct qstr anonstring = QSTR_INIT("/", 1); + struct dentry *tmp; + struct dentry *res; + +diff --git a/fs/exec.c b/fs/exec.c +index 0039055..c6e6de4 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1175,9 +1175,24 @@ void free_bprm(struct linux_binprm *bprm) + mutex_unlock(¤t->signal->cred_guard_mutex); + abort_creds(bprm->cred); + } ++ /* If a binfmt changed the interp, free it. */ ++ if (bprm->interp != bprm->filename) ++ kfree(bprm->interp); + kfree(bprm); + } + ++int bprm_change_interp(char *interp, struct linux_binprm *bprm) ++{ ++ /* If a binfmt changed the interp, free it first. */ ++ if (bprm->interp != bprm->filename) ++ kfree(bprm->interp); ++ bprm->interp = kstrdup(interp, GFP_KERNEL); ++ if (!bprm->interp) ++ return -ENOMEM; ++ return 0; ++} ++EXPORT_SYMBOL(bprm_change_interp); ++ + /* + * install the new credentials for this executable + */ +diff --git a/fs/file_table.c b/fs/file_table.c +index a72bf9d..de9e965 100644 +--- a/fs/file_table.c ++++ b/fs/file_table.c +@@ -458,8 +458,8 @@ void mark_files_ro(struct super_block *sb) + spin_unlock(&f->f_lock); + if (file_check_writeable(f) != 0) + continue; ++ __mnt_drop_write(f->f_path.mnt); + file_release_write(f); +- mnt_drop_write_file(f); + } while_file_list_for_each_entry; + lg_global_unlock(&files_lglock); + } +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index 76b4a7a..36b2e7a 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -205,7 +205,7 @@ static u32 initiate_bulk_draining(struct nfs_client *clp, + + list_for_each_entry(lo, &server->layouts, plh_layouts) { + ino = igrab(lo->plh_inode); +- if (ino) ++ if (!ino) + continue; + spin_lock(&ino->i_lock); + /* Is this layout in the process of being freed? */ +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index 8b39a42..5e8d24d 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -615,8 +615,7 @@ EXPORT_SYMBOL_GPL(nfs_create_rpc_client); + */ + static void nfs_destroy_server(struct nfs_server *server) + { +- if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) || +- !(server->flags & NFS_MOUNT_LOCAL_FCNTL)) ++ if (server->nlm_host) + nlmclnt_done(server->nlm_host); + } + +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index b9e66b7..c99f07e 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1156,11 +1156,14 @@ static int nfs_dentry_delete(const struct dentry *dentry) + + } + ++/* Ensure that we revalidate inode->i_nlink */ + static void nfs_drop_nlink(struct inode *inode) + { + spin_lock(&inode->i_lock); +- if (inode->i_nlink > 0) +- drop_nlink(inode); ++ /* drop the inode if we're reasonably sure this is the last link */ ++ if (inode->i_nlink == 1) ++ clear_nlink(inode); ++ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; + spin_unlock(&inode->i_lock); + } + +@@ -1175,8 +1178,8 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; + + if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { +- drop_nlink(inode); + nfs_complete_unlink(dentry, inode); ++ nfs_drop_nlink(inode); + } + iput(inode); + } +@@ -1647,10 +1650,8 @@ static int nfs_safe_remove(struct dentry *dentry) + if (inode != NULL) { + NFS_PROTO(inode)->return_delegation(inode); + error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); +- /* The VFS may want to delete this inode */ + if (error == 0) + nfs_drop_nlink(inode); +- nfs_mark_for_revalidate(inode); + } else + error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); + if (error == -ENOENT) +@@ -2147,12 +2148,16 @@ static int nfs_open_permission_mask(int openflags) + { + int mask = 0; + +- if ((openflags & O_ACCMODE) != O_WRONLY) +- mask |= MAY_READ; +- if ((openflags & O_ACCMODE) != O_RDONLY) +- mask |= MAY_WRITE; +- if (openflags & __FMODE_EXEC) +- mask |= MAY_EXEC; ++ if (openflags & __FMODE_EXEC) { ++ /* ONLY check exec rights */ ++ mask = MAY_EXEC; ++ } else { ++ if ((openflags & O_ACCMODE) != O_WRONLY) ++ mask |= MAY_READ; ++ if ((openflags & O_ACCMODE) != O_RDONLY) ++ mask |= MAY_WRITE; ++ } ++ + return mask; + } + +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index cae26cb..594f4e7 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -266,14 +266,6 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) + struct nfs_page *req = nfs_list_entry(hdr->pages.next); + struct page *page = req->wb_page; + +- if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { +- if (bytes > hdr->good_bytes) +- zero_user(page, 0, PAGE_SIZE); +- else if (hdr->good_bytes - bytes < PAGE_SIZE) +- zero_user_segment(page, +- hdr->good_bytes & ~PAGE_MASK, +- PAGE_SIZE); +- } + if (!PageCompound(page)) { + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { + if (bytes < hdr->good_bytes) +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 5eec442..4432b2f 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1721,7 +1721,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data) + + static int nfs4_opendata_access(struct rpc_cred *cred, + struct nfs4_opendata *opendata, +- struct nfs4_state *state, fmode_t fmode) ++ struct nfs4_state *state, fmode_t fmode, ++ int openflags) + { + struct nfs_access_entry cache; + u32 mask; +@@ -1733,11 +1734,14 @@ static int nfs4_opendata_access(struct rpc_cred *cred, + + mask = 0; + /* don't check MAY_WRITE - a newly created file may not have +- * write mode bits, but POSIX allows the creating process to write */ +- if (fmode & FMODE_READ) +- mask |= MAY_READ; +- if (fmode & FMODE_EXEC) +- mask |= MAY_EXEC; ++ * write mode bits, but POSIX allows the creating process to write. ++ * use openflags to check for exec, because fmode won't ++ * always have FMODE_EXEC set when file open for exec. */ ++ if (openflags & __FMODE_EXEC) { ++ /* ONLY check for exec rights */ ++ mask = MAY_EXEC; ++ } else if (fmode & FMODE_READ) ++ mask = MAY_READ; + + cache.cred = cred; + cache.jiffies = jiffies; +@@ -2009,7 +2013,7 @@ static int _nfs4_do_open(struct inode *dir, + if (server->caps & NFS_CAP_POSIX_LOCK) + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); + +- status = nfs4_opendata_access(cred, opendata, state, fmode); ++ status = nfs4_opendata_access(cred, opendata, state, fmode, flags); + if (status != 0) + goto err_opendata_put; + +@@ -3937,8 +3941,13 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu + goto out_free; + } + nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); +- if (buf) ++ if (buf) { ++ if (res.acl_len > buflen) { ++ ret = -ERANGE; ++ goto out_free; ++ } + _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); ++ } + out_ok: + ret = res.acl_len; + out_free: +@@ -6138,13 +6147,26 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) + rpc_call_start(task); + } + ++static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data) ++{ ++ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); ++ nfs41_sequence_prepare(task, data); ++} ++ + static const struct rpc_call_ops nfs41_sequence_ops = { + .rpc_call_done = nfs41_sequence_call_done, + .rpc_call_prepare = nfs41_sequence_prepare, + .rpc_release = nfs41_sequence_release, + }; + +-static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) ++static const struct rpc_call_ops nfs41_sequence_privileged_ops = { ++ .rpc_call_done = nfs41_sequence_call_done, ++ .rpc_call_prepare = nfs41_sequence_prepare_privileged, ++ .rpc_release = nfs41_sequence_release, ++}; ++ ++static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred, ++ const struct rpc_call_ops *seq_ops) + { + struct nfs4_sequence_data *calldata; + struct rpc_message msg = { +@@ -6154,7 +6176,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ + struct rpc_task_setup task_setup_data = { + .rpc_client = clp->cl_rpcclient, + .rpc_message = &msg, +- .callback_ops = &nfs41_sequence_ops, ++ .callback_ops = seq_ops, + .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, + }; + +@@ -6181,7 +6203,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr + + if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) + return 0; +- task = _nfs41_proc_sequence(clp, cred); ++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops); + if (IS_ERR(task)) + ret = PTR_ERR(task); + else +@@ -6195,7 +6217,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) + struct rpc_task *task; + int ret; + +- task = _nfs41_proc_sequence(clp, cred); ++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops); + if (IS_ERR(task)) { + ret = PTR_ERR(task); + goto out; +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 2878f97..b7f7538 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -254,7 +254,7 @@ static void + pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) + { + lo->plh_retry_timestamp = jiffies; +- if (test_and_set_bit(fail_bit, &lo->plh_flags)) ++ if (!test_and_set_bit(fail_bit, &lo->plh_flags)) + atomic_inc(&lo->plh_refcount); + } + +diff --git a/fs/nfs/read.c b/fs/nfs/read.c +index b6bdb18..a5e5d98 100644 +--- a/fs/nfs/read.c ++++ b/fs/nfs/read.c +@@ -91,12 +91,16 @@ void nfs_readdata_release(struct nfs_read_data *rdata) + put_nfs_open_context(rdata->args.context); + if (rdata->pages.pagevec != rdata->pages.page_array) + kfree(rdata->pages.pagevec); +- if (rdata != &read_header->rpc_data) +- kfree(rdata); +- else ++ if (rdata == &read_header->rpc_data) { + rdata->header = NULL; ++ rdata = NULL; ++ } + if (atomic_dec_and_test(&hdr->refcnt)) + hdr->completion_ops->completion(hdr); ++ /* Note: we only free the rpc_task after callbacks are done. ++ * See the comment in rpc_free_task() for why ++ */ ++ kfree(rdata); + } + EXPORT_SYMBOL_GPL(nfs_readdata_release); + +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index 652d3f7..00be08f 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -1150,7 +1150,7 @@ static int nfs_get_option_str(substring_t args[], char **option) + { + kfree(*option); + *option = match_strdup(args); +- return !option; ++ return !*option; + } + + static int nfs_get_option_ul(substring_t args[], unsigned long *option) +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 9347ab7..8b67dc3 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -126,12 +126,16 @@ void nfs_writedata_release(struct nfs_write_data *wdata) + put_nfs_open_context(wdata->args.context); + if (wdata->pages.pagevec != wdata->pages.page_array) + kfree(wdata->pages.pagevec); +- if (wdata != &write_header->rpc_data) +- kfree(wdata); +- else ++ if (wdata == &write_header->rpc_data) { + wdata->header = NULL; ++ wdata = NULL; ++ } + if (atomic_dec_and_test(&hdr->refcnt)) + hdr->completion_ops->completion(hdr); ++ /* Note: we only free the rpc_task after callbacks are done. ++ * See the comment in rpc_free_task() for why ++ */ ++ kfree(wdata); + } + EXPORT_SYMBOL_GPL(nfs_writedata_release); + +@@ -202,7 +206,6 @@ out: + /* A writeback failed: mark the page as bad, and invalidate the page cache */ + static void nfs_set_pageerror(struct page *page) + { +- SetPageError(page); + nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); + } + +@@ -884,7 +887,7 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) + { + if (nfs_have_delegated_attributes(inode)) + goto out; +- if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE) ++ if (NFS_I(inode)->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE)) + return false; + out: + return PageUptodate(page) != 0; +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index 6c9a4b2..f75af01 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -194,6 +194,7 @@ static __be32 + do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) + { + struct svc_fh *resfh; ++ int accmode; + __be32 status; + + resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL); +@@ -253,9 +254,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o + /* set reply cache */ + fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, + &resfh->fh_handle); +- if (!open->op_created) +- status = do_open_permission(rqstp, resfh, open, +- NFSD_MAY_NOP); ++ accmode = NFSD_MAY_NOP; ++ if (open->op_created) ++ accmode |= NFSD_MAY_OWNER_OVERRIDE; ++ status = do_open_permission(rqstp, resfh, open, accmode); + set_change_info(&open->op_cinfo, current_fh); + fh_dup2(current_fh, resfh); + out: +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index d0237f8..503e15e 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -2340,7 +2340,7 @@ nfsd4_init_slabs(void) + if (openowner_slab == NULL) + goto out_nomem; + lockowner_slab = kmem_cache_create("nfsd4_lockowners", +- sizeof(struct nfs4_openowner), 0, 0, NULL); ++ sizeof(struct nfs4_lockowner), 0, 0, NULL); + if (lockowner_slab == NULL) + goto out_nomem; + file_slab = kmem_cache_create("nfsd4_files", +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index fd548d1..ac6850f 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -2946,11 +2946,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, + len = maxcount; + v = 0; + while (len > 0) { +- pn = resp->rqstp->rq_resused++; ++ pn = resp->rqstp->rq_resused; ++ if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */ ++ maxcount -= len; ++ break; ++ } + resp->rqstp->rq_vec[v].iov_base = + page_address(resp->rqstp->rq_respages[pn]); + resp->rqstp->rq_vec[v].iov_len = + len < PAGE_SIZE ? len : PAGE_SIZE; ++ resp->rqstp->rq_resused++; + v++; + len -= PAGE_SIZE; + } +@@ -2996,6 +3001,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd + return nfserr; + if (resp->xbuf->page_len) + return nfserr_resource; ++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) ++ return nfserr_resource; + + page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); + +@@ -3045,6 +3052,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 + return nfserr; + if (resp->xbuf->page_len) + return nfserr_resource; ++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) ++ return nfserr_resource; + + RESERVE_SPACE(NFS4_VERIFIER_SIZE); + savep = p; +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 2013aa00..30d3784 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -640,7 +640,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + } + + /* Store reply in cache. */ +- nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1); ++ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1); + return 1; + } + +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index c120b48..f59169e 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -1485,13 +1485,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, + case NFS3_CREATE_EXCLUSIVE: + if ( dchild->d_inode->i_mtime.tv_sec == v_mtime + && dchild->d_inode->i_atime.tv_sec == v_atime +- && dchild->d_inode->i_size == 0 ) ++ && dchild->d_inode->i_size == 0 ) { ++ if (created) ++ *created = 1; + break; ++ } + case NFS4_CREATE_EXCLUSIVE4_1: + if ( dchild->d_inode->i_mtime.tv_sec == v_mtime + && dchild->d_inode->i_atime.tv_sec == v_atime +- && dchild->d_inode->i_size == 0 ) ++ && dchild->d_inode->i_size == 0 ) { ++ if (created) ++ *created = 1; + goto set_attr; ++ } + /* fallthru */ + case NFS3_CREATE_GUARDED: + err = nfserr_exist; +diff --git a/fs/proc/array.c b/fs/proc/array.c +index c1c207c..bd31e02 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -212,7 +212,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, + group_info = cred->group_info; + task_unlock(p); + +- for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++) ++ for (g = 0; g < group_info->ngroups; g++) + seq_printf(m, "%d ", + from_kgid_munged(user_ns, GROUP_AT(group_info, g))); + put_cred(cred); +diff --git a/fs/splice.c b/fs/splice.c +index 13e5b47..48c7bd1 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -696,8 +696,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, + return -EINVAL; + + more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; +- if (sd->len < sd->total_len) ++ ++ if (sd->len < sd->total_len && pipe->nrbufs > 1) + more |= MSG_SENDPAGE_NOTLAST; ++ + return file->f_op->sendpage(file, buf->page, buf->offset, + sd->len, &pos, more); + } +diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c +index e57e2da..e0e9ee0 100644 +--- a/fs/xfs/xfs_aops.c ++++ b/fs/xfs/xfs_aops.c +@@ -124,7 +124,7 @@ xfs_setfilesize_trans_alloc( + ioend->io_append_trans = tp; + + /* +- * We will pass freeze protection with a transaction. So tell lockdep ++ * We may pass freeze protection with a transaction. So tell lockdep + * we released it. + */ + rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], +@@ -149,11 +149,13 @@ xfs_setfilesize( + xfs_fsize_t isize; + + /* +- * The transaction was allocated in the I/O submission thread, +- * thus we need to mark ourselves as beeing in a transaction +- * manually. ++ * The transaction may have been allocated in the I/O submission thread, ++ * thus we need to mark ourselves as beeing in a transaction manually. ++ * Similarly for freeze protection. + */ + current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); ++ rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], ++ 0, 1, _THIS_IP_); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); +@@ -187,7 +189,8 @@ xfs_finish_ioend( + + if (ioend->io_type == XFS_IO_UNWRITTEN) + queue_work(mp->m_unwritten_workqueue, &ioend->io_work); +- else if (ioend->io_append_trans) ++ else if (ioend->io_append_trans || ++ (ioend->io_isdirect && xfs_ioend_is_append(ioend))) + queue_work(mp->m_data_workqueue, &ioend->io_work); + else + xfs_destroy_ioend(ioend); +@@ -205,15 +208,6 @@ xfs_end_io( + struct xfs_inode *ip = XFS_I(ioend->io_inode); + int error = 0; + +- if (ioend->io_append_trans) { +- /* +- * We've got freeze protection passed with the transaction. +- * Tell lockdep about it. +- */ +- rwsem_acquire_read( +- &ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], +- 0, 1, _THIS_IP_); +- } + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + ioend->io_error = -EIO; + goto done; +@@ -226,35 +220,31 @@ xfs_end_io( + * range to normal written extens after the data I/O has finished. + */ + if (ioend->io_type == XFS_IO_UNWRITTEN) { ++ error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ++ ioend->io_size); ++ } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) { + /* +- * For buffered I/O we never preallocate a transaction when +- * doing the unwritten extent conversion, but for direct I/O +- * we do not know if we are converting an unwritten extent +- * or not at the point where we preallocate the transaction. ++ * For direct I/O we do not know if we need to allocate blocks ++ * or not so we can't preallocate an append transaction as that ++ * results in nested reservations and log space deadlocks. Hence ++ * allocate the transaction here. While this is sub-optimal and ++ * can block IO completion for some time, we're stuck with doing ++ * it this way until we can pass the ioend to the direct IO ++ * allocation callbacks and avoid nesting that way. + */ +- if (ioend->io_append_trans) { +- ASSERT(ioend->io_isdirect); +- +- current_set_flags_nested( +- &ioend->io_append_trans->t_pflags, PF_FSTRANS); +- xfs_trans_cancel(ioend->io_append_trans, 0); +- } +- +- error = xfs_iomap_write_unwritten(ip, ioend->io_offset, +- ioend->io_size); +- if (error) { +- ioend->io_error = -error; ++ error = xfs_setfilesize_trans_alloc(ioend); ++ if (error) + goto done; +- } ++ error = xfs_setfilesize(ioend); + } else if (ioend->io_append_trans) { + error = xfs_setfilesize(ioend); +- if (error) +- ioend->io_error = -error; + } else { + ASSERT(!xfs_ioend_is_append(ioend)); + } + + done: ++ if (error) ++ ioend->io_error = -error; + xfs_destroy_ioend(ioend); + } + +@@ -1432,25 +1422,21 @@ xfs_vm_direct_IO( + size_t size = iov_length(iov, nr_segs); + + /* +- * We need to preallocate a transaction for a size update +- * here. In the case that this write both updates the size +- * and converts at least on unwritten extent we will cancel +- * the still clean transaction after the I/O has finished. ++ * We cannot preallocate a size update transaction here as we ++ * don't know whether allocation is necessary or not. Hence we ++ * can only tell IO completion that one is necessary if we are ++ * not doing unwritten extent conversion. + */ + iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT); +- if (offset + size > XFS_I(inode)->i_d.di_size) { +- ret = xfs_setfilesize_trans_alloc(ioend); +- if (ret) +- goto out_destroy_ioend; ++ if (offset + size > XFS_I(inode)->i_d.di_size) + ioend->io_isdirect = 1; +- } + + ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, + offset, nr_segs, + xfs_get_blocks_direct, + xfs_end_io_direct_write, NULL, 0); + if (ret != -EIOCBQUEUED && iocb->private) +- goto out_trans_cancel; ++ goto out_destroy_ioend; + } else { + ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, + offset, nr_segs, +@@ -1460,15 +1446,6 @@ xfs_vm_direct_IO( + + return ret; + +-out_trans_cancel: +- if (ioend->io_append_trans) { +- current_set_flags_nested(&ioend->io_append_trans->t_pflags, +- PF_FSTRANS); +- rwsem_acquire_read( +- &inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], +- 0, 1, _THIS_IP_); +- xfs_trans_cancel(ioend->io_append_trans, 0); +- } + out_destroy_ioend: + xfs_destroy_ioend(ioend); + return ret; +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c +index 4dad756..a54c292 100644 +--- a/fs/xfs/xfs_log.c ++++ b/fs/xfs/xfs_log.c +@@ -458,7 +458,8 @@ xfs_log_reserve( + tic->t_trans_type = t_type; + *ticp = tic; + +- xlog_grant_push_ail(log, tic->t_unit_res * tic->t_cnt); ++ xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt ++ : tic->t_unit_res); + + trace_xfs_log_reserve(log, tic); + +diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c +index 2e86fa0..335a340 100644 +--- a/fs/xfs/xfs_qm.c ++++ b/fs/xfs/xfs_qm.c +@@ -1453,7 +1453,7 @@ xfs_qm_dqreclaim_one( + int error; + + if (!xfs_dqlock_nowait(dqp)) +- goto out_busy; ++ goto out_move_tail; + + /* + * This dquot has acquired a reference in the meantime remove it from +@@ -1476,7 +1476,7 @@ xfs_qm_dqreclaim_one( + * getting flushed to disk, we don't want to reclaim it. + */ + if (!xfs_dqflock_nowait(dqp)) +- goto out_busy; ++ goto out_unlock_move_tail; + + if (XFS_DQ_IS_DIRTY(dqp)) { + struct xfs_buf *bp = NULL; +@@ -1487,7 +1487,7 @@ xfs_qm_dqreclaim_one( + if (error) { + xfs_warn(mp, "%s: dquot %p flush failed", + __func__, dqp); +- goto out_busy; ++ goto out_unlock_move_tail; + } + + xfs_buf_delwri_queue(bp, buffer_list); +@@ -1496,7 +1496,7 @@ xfs_qm_dqreclaim_one( + * Give the dquot another try on the freelist, as the + * flushing will take some time. + */ +- goto out_busy; ++ goto out_unlock_move_tail; + } + xfs_dqfunlock(dqp); + +@@ -1515,14 +1515,13 @@ xfs_qm_dqreclaim_one( + XFS_STATS_INC(xs_qm_dqreclaims); + return; + +-out_busy: +- xfs_dqunlock(dqp); +- + /* + * Move the dquot to the tail of the list so that we don't spin on it. + */ ++out_unlock_move_tail: ++ xfs_dqunlock(dqp); ++out_move_tail: + list_move_tail(&dqp->q_lru, &qi->qi_lru_list); +- + trace_xfs_dqreclaim_busy(dqp); + XFS_STATS_INC(xs_qm_dqreclaim_misses); + } +diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h +index ed6642a..25f01d0 100644 +--- a/include/asm-generic/tlb.h ++++ b/include/asm-generic/tlb.h +@@ -78,6 +78,14 @@ struct mmu_gather_batch { + #define MAX_GATHER_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) + ++/* ++ * Limit the maximum number of mmu_gather batches to reduce a risk of soft ++ * lockups for non-preemptible kernels on huge machines when a lot of memory ++ * is zapped during unmapping. ++ * 10K pages freed at once should be safe even without a preemption point. ++ */ ++#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) ++ + /* struct mmu_gather is an opaque type used by the mm code for passing around + * any data needed by arch specific code for tlb_remove_page. + */ +@@ -96,6 +104,7 @@ struct mmu_gather { + struct mmu_gather_batch *active; + struct mmu_gather_batch local; + struct page *__pages[MMU_GATHER_BUNDLE]; ++ unsigned int batch_count; + }; + + #define HAVE_GENERIC_MMU_GATHER +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h +index cfcc6bf..de0628e 100644 +--- a/include/linux/binfmts.h ++++ b/include/linux/binfmts.h +@@ -114,6 +114,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, + unsigned long stack_top, + int executable_stack); + extern int bprm_mm_init(struct linux_binprm *bprm); ++extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); + extern int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm); + extern int prepare_bprm_creds(struct linux_binprm *bprm); +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index f8a030c..4cd1d0f 100644 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -34,7 +34,6 @@ extern int cgroup_lock_is_held(void); + extern bool cgroup_lock_live_group(struct cgroup *cgrp); + extern void cgroup_unlock(void); + extern void cgroup_fork(struct task_struct *p); +-extern void cgroup_fork_callbacks(struct task_struct *p); + extern void cgroup_post_fork(struct task_struct *p); + extern void cgroup_exit(struct task_struct *p, int run_callbacks); + extern int cgroupstats_build(struct cgroupstats *stats, +diff --git a/include/linux/freezer.h b/include/linux/freezer.h +index d09af4b..ee89932 100644 +--- a/include/linux/freezer.h ++++ b/include/linux/freezer.h +@@ -75,28 +75,62 @@ static inline bool cgroup_freezing(struct task_struct *task) + */ + + +-/* Tell the freezer not to count the current task as freezable. */ ++/** ++ * freezer_do_not_count - tell freezer to ignore %current ++ * ++ * Tell freezers to ignore the current task when determining whether the ++ * target frozen state is reached. IOW, the current task will be ++ * considered frozen enough by freezers. ++ * ++ * The caller shouldn't do anything which isn't allowed for a frozen task ++ * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair ++ * wrap a scheduling operation and nothing much else. ++ */ + static inline void freezer_do_not_count(void) + { + current->flags |= PF_FREEZER_SKIP; + } + +-/* +- * Tell the freezer to count the current task as freezable again and try to +- * freeze it. ++/** ++ * freezer_count - tell freezer to stop ignoring %current ++ * ++ * Undo freezer_do_not_count(). It tells freezers that %current should be ++ * considered again and tries to freeze if freezing condition is already in ++ * effect. + */ + static inline void freezer_count(void) + { + current->flags &= ~PF_FREEZER_SKIP; ++ /* ++ * If freezing is in progress, the following paired with smp_mb() ++ * in freezer_should_skip() ensures that either we see %true ++ * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP. ++ */ ++ smp_mb(); + try_to_freeze(); + } + +-/* +- * Check if the task should be counted as freezable by the freezer ++/** ++ * freezer_should_skip - whether to skip a task when determining frozen ++ * state is reached ++ * @p: task in quesion ++ * ++ * This function is used by freezers after establishing %true freezing() to ++ * test whether a task should be skipped when determining the target frozen ++ * state is reached. IOW, if this function returns %true, @p is considered ++ * frozen enough. + */ +-static inline int freezer_should_skip(struct task_struct *p) ++static inline bool freezer_should_skip(struct task_struct *p) + { +- return !!(p->flags & PF_FREEZER_SKIP); ++ /* ++ * The following smp_mb() paired with the one in freezer_count() ++ * ensures that either freezer_count() sees %true freezing() or we ++ * see cleared %PF_FREEZER_SKIP and return %false. This makes it ++ * impossible for a task to slip frozen state testing after ++ * clearing %PF_FREEZER_SKIP. ++ */ ++ smp_mb(); ++ return p->flags & PF_FREEZER_SKIP; + } + + /* +diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h +index d73878c..ce8217f 100644 +--- a/include/linux/hugetlb_cgroup.h ++++ b/include/linux/hugetlb_cgroup.h +@@ -62,7 +62,7 @@ extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, + struct page *page); + extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg); +-extern int hugetlb_cgroup_file_init(int idx) __init; ++extern void hugetlb_cgroup_file_init(void) __init; + extern void hugetlb_cgroup_migrate(struct page *oldhpage, + struct page *newhpage); + +@@ -111,9 +111,8 @@ hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, + return; + } + +-static inline int __init hugetlb_cgroup_file_init(int idx) ++static inline void hugetlb_cgroup_file_init(void) + { +- return 0; + } + + static inline void hugetlb_cgroup_migrate(struct page *oldhpage, +diff --git a/include/linux/kernel.h b/include/linux/kernel.h +index 7d8dfc7..b35fd0d 100644 +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -77,13 +77,15 @@ + + /* + * Divide positive or negative dividend by positive divisor and round +- * to closest integer. Result is undefined for negative divisors. ++ * to closest integer. Result is undefined for negative divisors and ++ * for negative dividends if the divisor variable type is unsigned. + */ + #define DIV_ROUND_CLOSEST(x, divisor)( \ + { \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ +- (((typeof(x))-1) > 0 || (__x) > 0) ? \ ++ (((typeof(x))-1) > 0 || \ ++ ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ + } \ +diff --git a/include/linux/libata.h b/include/linux/libata.h +index 77eeeda..e931c9a 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -163,6 +163,7 @@ enum { + + ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ + ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */ ++ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */ + + ATA_DEV_UNKNOWN = 0, /* unknown device */ + ATA_DEV_ATA = 1, /* ATA device */ +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h +index b5d1384..70473da 100644 +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page) + * pages on the LRU and/or pagecache. + */ + TESTPAGEFLAG(Compound, compound) +-__PAGEFLAG(Head, compound) ++__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) + + /* + * PG_reclaim is used in combination with PG_compound to mark the +@@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound) + * PG_compound & PG_reclaim => Tail page + * PG_compound & ~PG_reclaim => Head page + */ ++#define PG_head_mask ((1L << PG_compound)) + #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) + ++static inline int PageHead(struct page *page) ++{ ++ return ((page->flags & PG_head_tail_mask) == PG_head_mask); ++} ++ + static inline int PageTail(struct page *page) + { + return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 9d36b82..d16ad56 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -1568,6 +1568,7 @@ + #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 + #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 + #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 ++#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 + #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 + #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 + #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 +diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h +index 4fd8a4b..e5062c9 100644 +--- a/include/net/gro_cells.h ++++ b/include/net/gro_cells.h +@@ -17,7 +17,6 @@ struct gro_cells { + + static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) + { +- unsigned long flags; + struct gro_cell *cell = gcells->cells; + struct net_device *dev = skb->dev; + +@@ -35,32 +34,37 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s + return; + } + +- spin_lock_irqsave(&cell->napi_skbs.lock, flags); ++ /* We run in BH context */ ++ spin_lock(&cell->napi_skbs.lock); + + __skb_queue_tail(&cell->napi_skbs, skb); + if (skb_queue_len(&cell->napi_skbs) == 1) + napi_schedule(&cell->napi); + +- spin_unlock_irqrestore(&cell->napi_skbs.lock, flags); ++ spin_unlock(&cell->napi_skbs.lock); + } + ++/* called unser BH context */ + static inline int gro_cell_poll(struct napi_struct *napi, int budget) + { + struct gro_cell *cell = container_of(napi, struct gro_cell, napi); + struct sk_buff *skb; + int work_done = 0; + ++ spin_lock(&cell->napi_skbs.lock); + while (work_done < budget) { +- skb = skb_dequeue(&cell->napi_skbs); ++ skb = __skb_dequeue(&cell->napi_skbs); + if (!skb) + break; +- ++ spin_unlock(&cell->napi_skbs.lock); + napi_gro_receive(napi, skb); + work_done++; ++ spin_lock(&cell->napi_skbs.lock); + } + + if (work_done < budget) + napi_complete(napi); ++ spin_unlock(&cell->napi_skbs.lock); + return work_done; + } + +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h +index ba1d361..1832927 100644 +--- a/include/net/inet_connection_sock.h ++++ b/include/net/inet_connection_sock.h +@@ -318,6 +318,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent, + const unsigned long max_rto); + + extern void inet_csk_destroy_sock(struct sock *sk); ++extern void inet_csk_prepare_forced_close(struct sock *sk); + + /* + * LISTEN is a special case for poll.. +diff --git a/include/net/ndisc.h b/include/net/ndisc.h +index 980d263..6b305d7 100644 +--- a/include/net/ndisc.h ++++ b/include/net/ndisc.h +@@ -78,6 +78,13 @@ struct ra_msg { + __be32 retrans_timer; + }; + ++struct rd_msg { ++ struct icmp6hdr icmph; ++ struct in6_addr target; ++ struct in6_addr dest; ++ __u8 opt[0]; ++}; ++ + struct nd_opt_hdr { + __u8 nd_opt_type; + __u8 nd_opt_len; +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index f24f724..ad99830 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -2744,9 +2744,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, + dentry->d_fsdata = cgrp; + inc_nlink(parent->d_inode); + rcu_assign_pointer(cgrp->dentry, dentry); +- dget(dentry); + } +- dput(dentry); + + return error; + } +@@ -2791,12 +2789,6 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, + + simple_xattrs_init(&cft->xattrs); + +- /* does @cft->flags tell us to skip creation on @cgrp? */ +- if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) +- return 0; +- if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent) +- return 0; +- + if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { + strcpy(name, subsys->name); + strcat(name, "."); +@@ -2837,6 +2829,12 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, + int err, ret = 0; + + for (cft = cfts; cft->name[0] != '\0'; cft++) { ++ /* does cft->flags tell us to skip this file on @cgrp? */ ++ if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) ++ continue; ++ if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent) ++ continue; ++ + if (is_add) + err = cgroup_add_file(cgrp, subsys, cft); + else +@@ -4832,44 +4830,19 @@ void cgroup_fork(struct task_struct *child) + } + + /** +- * cgroup_fork_callbacks - run fork callbacks +- * @child: the new task +- * +- * Called on a new task very soon before adding it to the +- * tasklist. No need to take any locks since no-one can +- * be operating on this task. +- */ +-void cgroup_fork_callbacks(struct task_struct *child) +-{ +- if (need_forkexit_callback) { +- int i; +- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { +- struct cgroup_subsys *ss = subsys[i]; +- +- /* +- * forkexit callbacks are only supported for +- * builtin subsystems. +- */ +- if (!ss || ss->module) +- continue; +- +- if (ss->fork) +- ss->fork(child); +- } +- } +-} +- +-/** + * cgroup_post_fork - called on a new task after adding it to the task list + * @child: the task in question + * +- * Adds the task to the list running through its css_set if necessary. +- * Has to be after the task is visible on the task list in case we race +- * with the first call to cgroup_iter_start() - to guarantee that the +- * new task ends up on its list. ++ * Adds the task to the list running through its css_set if necessary and ++ * call the subsystem fork() callbacks. Has to be after the task is ++ * visible on the task list in case we race with the first call to ++ * cgroup_iter_start() - to guarantee that the new task ends up on its ++ * list. + */ + void cgroup_post_fork(struct task_struct *child) + { ++ int i; ++ + /* + * use_task_css_set_links is set to 1 before we walk the tasklist + * under the tasklist_lock and we read it here after we added the child +@@ -4889,7 +4862,30 @@ void cgroup_post_fork(struct task_struct *child) + task_unlock(child); + write_unlock(&css_set_lock); + } ++ ++ /* ++ * Call ss->fork(). This must happen after @child is linked on ++ * css_set; otherwise, @child might change state between ->fork() ++ * and addition to css_set. ++ */ ++ if (need_forkexit_callback) { ++ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { ++ struct cgroup_subsys *ss = subsys[i]; ++ ++ /* ++ * fork/exit callbacks are supported only for ++ * builtin subsystems and we don't need further ++ * synchronization as they never go away. ++ */ ++ if (!ss || ss->module) ++ continue; ++ ++ if (ss->fork) ++ ss->fork(child); ++ } ++ } + } ++ + /** + * cgroup_exit - detach cgroup from exiting task + * @tsk: pointer to task_struct of exiting process +diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c +index b1724ce..12bfedb 100644 +--- a/kernel/cgroup_freezer.c ++++ b/kernel/cgroup_freezer.c +@@ -186,23 +186,15 @@ static void freezer_fork(struct task_struct *task) + { + struct freezer *freezer; + +- /* +- * No lock is needed, since the task isn't on tasklist yet, +- * so it can't be moved to another cgroup, which means the +- * freezer won't be removed and will be valid during this +- * function call. Nevertheless, apply RCU read-side critical +- * section to suppress RCU lockdep false positives. +- */ + rcu_read_lock(); + freezer = task_freezer(task); +- rcu_read_unlock(); + + /* + * The root cgroup is non-freezable, so we can skip the + * following check. + */ + if (!freezer->css.cgroup->parent) +- return; ++ goto out; + + spin_lock_irq(&freezer->lock); + BUG_ON(freezer->state == CGROUP_FROZEN); +@@ -210,7 +202,10 @@ static void freezer_fork(struct task_struct *task) + /* Locking avoids race with FREEZING -> THAWED transitions. */ + if (freezer->state == CGROUP_FREEZING) + freeze_task(task); ++ + spin_unlock_irq(&freezer->lock); ++out: ++ rcu_read_unlock(); + } + + /* +diff --git a/kernel/fork.c b/kernel/fork.c +index 8b20ab7..acc4cb6 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1135,7 +1135,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, + { + int retval; + struct task_struct *p; +- int cgroup_callbacks_done = 0; + + if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) + return ERR_PTR(-EINVAL); +@@ -1393,12 +1392,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, + INIT_LIST_HEAD(&p->thread_group); + p->task_works = NULL; + +- /* Now that the task is set up, run cgroup callbacks if +- * necessary. We need to run them before the task is visible +- * on the tasklist. */ +- cgroup_fork_callbacks(p); +- cgroup_callbacks_done = 1; +- + /* Need tasklist lock for parent etc handling! */ + write_lock_irq(&tasklist_lock); + +@@ -1503,7 +1496,7 @@ bad_fork_cleanup_cgroup: + #endif + if (clone_flags & CLONE_THREAD) + threadgroup_change_end(current); +- cgroup_exit(p, cgroup_callbacks_done); ++ cgroup_exit(p, 0); + delayacct_tsk_free(p); + module_put(task_thread_info(p)->exec_domain->module); + bad_fork_cleanup_count: +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 4c69326..e48caf8 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -716,6 +716,7 @@ static void + irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) + { + cpumask_var_t mask; ++ bool valid = true; + + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) + return; +@@ -730,10 +731,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) + } + + raw_spin_lock_irq(&desc->lock); +- cpumask_copy(mask, desc->irq_data.affinity); ++ /* ++ * This code is triggered unconditionally. Check the affinity ++ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. ++ */ ++ if (desc->irq_data.affinity) ++ cpumask_copy(mask, desc->irq_data.affinity); ++ else ++ valid = false; + raw_spin_unlock_irq(&desc->lock); + +- set_cpus_allowed_ptr(current, mask); ++ if (valid) ++ set_cpus_allowed_ptr(current, mask); + free_cpumask_var(mask); + } + #else +@@ -936,6 +945,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + */ + get_task_struct(t); + new->thread = t; ++ /* ++ * Tell the thread to set its affinity. This is ++ * important for shared interrupt handlers as we do ++ * not invoke setup_affinity() for the secondary ++ * handlers as everything is already set up. Even for ++ * interrupts marked with IRQF_NO_BALANCE this is ++ * correct as we want the thread to move to the cpu(s) ++ * on which the requesting code placed the interrupt. ++ */ ++ set_bit(IRQTF_AFFINITY, &new->thread_flags); + } + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { +diff --git a/kernel/printk.c b/kernel/printk.c +index 2d607f4..f8e0b5a 100644 +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -847,10 +847,11 @@ static size_t print_time(u64 ts, char *buf) + if (!printk_time) + return 0; + ++ rem_nsec = do_div(ts, 1000000000); ++ + if (!buf) +- return 15; ++ return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts); + +- rem_nsec = do_div(ts, 1000000000); + return sprintf(buf, "[%5lu.%06lu] ", + (unsigned long)ts, rem_nsec / 1000); + } +diff --git a/kernel/rcutree.c b/kernel/rcutree.c +index 74df86b..2682295 100644 +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -212,13 +212,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { + #endif + }; + +-static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ +-static int qhimark = 10000; /* If this many pending, ignore blimit. */ +-static int qlowmark = 100; /* Once only this many pending, use blimit. */ ++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ ++static long qhimark = 10000; /* If this many pending, ignore blimit. */ ++static long qlowmark = 100; /* Once only this many pending, use blimit. */ + +-module_param(blimit, int, 0444); +-module_param(qhimark, int, 0444); +-module_param(qlowmark, int, 0444); ++module_param(blimit, long, 0444); ++module_param(qhimark, long, 0444); ++module_param(qlowmark, long, 0444); + + int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ + int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; +@@ -1769,7 +1769,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) + { + unsigned long flags; + struct rcu_head *next, *list, **tail; +- int bl, count, count_lazy, i; ++ long bl, count, count_lazy; ++ int i; + + /* If no callbacks are ready, just return.*/ + if (!cpu_has_callbacks_ready_to_invoke(rdp)) { +diff --git a/kernel/signal.c b/kernel/signal.c +index 0af8868..e4d4014 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -3221,6 +3221,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask) + int old = current->blocked.sig[0]; + sigset_t newset; + ++ siginitset(&newset, newmask); + set_current_blocked(&newset); + + return old; +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 9dcf15d..51b7159 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) + { + iter->pos = 0; + iter->func_pos = 0; +- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); ++ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); + } + + static void *t_start(struct seq_file *m, loff_t *pos) +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index b979426..4cb5e51 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) + struct list_head *head_page_with_bit; + + head_page = &rb_set_head_page(cpu_buffer)->list; ++ if (!head_page) ++ break; + prev_page = head_page->prev; + + first_page = pages->next; +@@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) + unsigned long flags; + struct ring_buffer_per_cpu *cpu_buffer; + struct buffer_page *bpage; +- unsigned long ret; ++ unsigned long ret = 0; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return 0; +@@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) + bpage = cpu_buffer->reader_page; + else + bpage = rb_set_head_page(cpu_buffer); +- ret = bpage->page->time_stamp; ++ if (bpage) ++ ret = bpage->page->time_stamp; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + + return ret; +@@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) + * Splice the empty reader page into the list around the head. + */ + reader = rb_set_head_page(cpu_buffer); ++ if (!reader) ++ goto out; + cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); + cpu_buffer->reader_page->list.prev = reader->list.prev; + +@@ -3778,12 +3783,17 @@ void + ring_buffer_read_finish(struct ring_buffer_iter *iter) + { + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; ++ unsigned long flags; + + /* + * Ring buffer is disabled from recording, here's a good place +- * to check the integrity of the ring buffer. ++ * to check the integrity of the ring buffer. ++ * Must prevent readers from trying to read, as the check ++ * clears the HEAD page and readers require it. + */ ++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + rb_check_pages(cpu_buffer); ++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&cpu_buffer->buffer->resize_disabled); +diff --git a/lib/atomic64.c b/lib/atomic64.c +index 9785378..08a4f06 100644 +--- a/lib/atomic64.c ++++ b/lib/atomic64.c +@@ -31,7 +31,11 @@ + static union { + raw_spinlock_t lock; + char pad[L1_CACHE_BYTES]; +-} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; ++} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { ++ [0 ... (NR_LOCKS - 1)] = { ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), ++ }, ++}; + + static inline raw_spinlock_t *lock_addr(const atomic64_t *v) + { +@@ -173,14 +177,3 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u) + return ret; + } + EXPORT_SYMBOL(atomic64_add_unless); +- +-static int init_atomic64_lock(void) +-{ +- int i; +- +- for (i = 0; i < NR_LOCKS; ++i) +- raw_spin_lock_init(&atomic64_lock[i].lock); +- return 0; +-} +- +-pure_initcall(init_atomic64_lock); +diff --git a/mm/dmapool.c b/mm/dmapool.c +index c5ab33b..da1b0f0 100644 +--- a/mm/dmapool.c ++++ b/mm/dmapool.c +@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */ + size_t allocation; + size_t boundary; + char name[32]; +- wait_queue_head_t waitq; + struct list_head pools; + }; + +@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ + unsigned int offset; + }; + +-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) +- + static DEFINE_MUTEX(pools_lock); + + static ssize_t +@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, + retval->size = size; + retval->boundary = boundary; + retval->allocation = allocation; +- init_waitqueue_head(&retval->waitq); + + if (dev) { + int ret; +@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) + memset(page->vaddr, POOL_POISON_FREED, pool->allocation); + #endif + pool_initialise_page(pool, page); +- list_add(&page->page_list, &pool->page_list); + page->in_use = 0; + page->offset = 0; + } else { +@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + might_sleep_if(mem_flags & __GFP_WAIT); + + spin_lock_irqsave(&pool->lock, flags); +- restart: + list_for_each_entry(page, &pool->page_list, page_list) { + if (page->offset < pool->allocation) + goto ready; + } +- page = pool_alloc_page(pool, GFP_ATOMIC); +- if (!page) { +- if (mem_flags & __GFP_WAIT) { +- DECLARE_WAITQUEUE(wait, current); + +- __set_current_state(TASK_UNINTERRUPTIBLE); +- __add_wait_queue(&pool->waitq, &wait); +- spin_unlock_irqrestore(&pool->lock, flags); ++ /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ ++ spin_unlock_irqrestore(&pool->lock, flags); + +- schedule_timeout(POOL_TIMEOUT_JIFFIES); ++ page = pool_alloc_page(pool, mem_flags); ++ if (!page) ++ return NULL; + +- spin_lock_irqsave(&pool->lock, flags); +- __remove_wait_queue(&pool->waitq, &wait); +- goto restart; +- } +- retval = NULL; +- goto done; +- } ++ spin_lock_irqsave(&pool->lock, flags); + ++ list_add(&page->page_list, &pool->page_list); + ready: + page->in_use++; + offset = page->offset; +@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + #ifdef DMAPOOL_DEBUG + memset(retval, POOL_POISON_ALLOCATED, pool->size); + #endif +- done: + spin_unlock_irqrestore(&pool->lock, flags); + return retval; + } +@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) + page->in_use--; + *(int *)vaddr = page->offset; + page->offset = offset; +- if (waitqueue_active(&pool->waitq)) +- wake_up_locked(&pool->waitq); + /* + * Resist a temptation to do + * if (!is_page_busy(page)) pool_free_page(pool, page); +diff --git a/mm/highmem.c b/mm/highmem.c +index 2da13a5..09fc744 100644 +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -105,6 +105,7 @@ struct page *kmap_to_page(void *vaddr) + + return virt_to_page(addr); + } ++EXPORT_SYMBOL(kmap_to_page); + + static void flush_all_zero_pkmaps(void) + { +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 59a0059..f198aca 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1906,14 +1906,12 @@ static int __init hugetlb_init(void) + default_hstate.max_huge_pages = default_hstate_max_huge_pages; + + hugetlb_init_hstates(); +- + gather_bootmem_prealloc(); +- + report_hugepages(); + + hugetlb_sysfs_init(); +- + hugetlb_register_all_nodes(); ++ hugetlb_cgroup_file_init(); + + return 0; + } +@@ -1943,13 +1941,6 @@ void __init hugetlb_add_hstate(unsigned order) + h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]); + snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", + huge_page_size(h)/1024); +- /* +- * Add cgroup control files only if the huge page consists +- * of more than two normal pages. This is because we use +- * page[2].lru.next for storing cgoup details. +- */ +- if (order >= HUGETLB_CGROUP_MIN_ORDER) +- hugetlb_cgroup_file_init(hugetlb_max_hstate - 1); + + parsed_hstate = h; + } +diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c +index a3f358f..a0143e5 100644 +--- a/mm/hugetlb_cgroup.c ++++ b/mm/hugetlb_cgroup.c +@@ -340,7 +340,7 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize) + return buf; + } + +-int __init hugetlb_cgroup_file_init(int idx) ++static void __init __hugetlb_cgroup_file_init(int idx) + { + char buf[32]; + struct cftype *cft; +@@ -382,7 +382,22 @@ int __init hugetlb_cgroup_file_init(int idx) + + WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); + +- return 0; ++ return; ++} ++ ++void __init hugetlb_cgroup_file_init(void) ++{ ++ struct hstate *h; ++ ++ for_each_hstate(h) { ++ /* ++ * Add cgroup control files only if the huge page consists ++ * of more than two normal pages. This is because we use ++ * page[2].lru.next for storing cgroup details. ++ */ ++ if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) ++ __hugetlb_cgroup_file_init(hstate_index(h)); ++ } + } + + /* +diff --git a/mm/memory.c b/mm/memory.c +index 221fc9f..f2973b2 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -182,10 +182,14 @@ static int tlb_next_batch(struct mmu_gather *tlb) + return 1; + } + ++ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) ++ return 0; ++ + batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); + if (!batch) + return 0; + ++ tlb->batch_count++; + batch->next = NULL; + batch->nr = 0; + batch->max = MAX_GATHER_BATCH; +@@ -214,6 +218,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) + tlb->local.nr = 0; + tlb->local.max = ARRAY_SIZE(tlb->__pages); + tlb->active = &tlb->local; ++ tlb->batch_count = 0; + + #ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index 4ea600d..002c281 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -2372,8 +2372,7 @@ void numa_default_policy(void) + */ + + /* +- * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag +- * Used only for mpol_parse_str() and mpol_to_str() ++ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag. + */ + #define MPOL_LOCAL MPOL_MAX + static const char * const policy_modes[] = +@@ -2388,28 +2387,21 @@ static const char * const policy_modes[] = + + #ifdef CONFIG_TMPFS + /** +- * mpol_parse_str - parse string to mempolicy ++ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. + * @str: string containing mempolicy to parse + * @mpol: pointer to struct mempolicy pointer, returned on success. +- * @no_context: flag whether to "contextualize" the mempolicy ++ * @unused: redundant argument, to be removed later. + * + * Format of input: + * [=][:] + * +- * if @no_context is true, save the input nodemask in w.user_nodemask in +- * the returned mempolicy. This will be used to "clone" the mempolicy in +- * a specific context [cpuset] at a later time. Used to parse tmpfs mpol +- * mount option. Note that if 'static' or 'relative' mode flags were +- * specified, the input nodemask will already have been saved. Saving +- * it again is redundant, but safe. +- * + * On success, returns 0, else 1 + */ +-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) ++int mpol_parse_str(char *str, struct mempolicy **mpol, int unused) + { + struct mempolicy *new = NULL; + unsigned short mode; +- unsigned short uninitialized_var(mode_flags); ++ unsigned short mode_flags; + nodemask_t nodes; + char *nodelist = strchr(str, ':'); + char *flags = strchr(str, '='); +@@ -2497,24 +2489,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) + if (IS_ERR(new)) + goto out; + +- if (no_context) { +- /* save for contextualization */ +- new->w.user_nodemask = nodes; +- } else { +- int ret; +- NODEMASK_SCRATCH(scratch); +- if (scratch) { +- task_lock(current); +- ret = mpol_set_nodemask(new, &nodes, scratch); +- task_unlock(current); +- } else +- ret = -ENOMEM; +- NODEMASK_SCRATCH_FREE(scratch); +- if (ret) { +- mpol_put(new); +- goto out; +- } +- } ++ /* ++ * Save nodes for mpol_to_str() to show the tmpfs mount options ++ * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. ++ */ ++ if (mode != MPOL_PREFERRED) ++ new->v.nodes = nodes; ++ else if (nodelist) ++ new->v.preferred_node = first_node(nodes); ++ else ++ new->flags |= MPOL_F_LOCAL; ++ ++ /* ++ * Save nodes for contextualization: this will be used to "clone" ++ * the mempolicy in a specific context [cpuset] at a later time. ++ */ ++ new->w.user_nodemask = nodes; ++ + err = 0; + + out: +@@ -2534,13 +2525,13 @@ out: + * @buffer: to contain formatted mempolicy string + * @maxlen: length of @buffer + * @pol: pointer to mempolicy to be formatted +- * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask ++ * @unused: redundant argument, to be removed later. + * + * Convert a mempolicy into a string. + * Returns the number of characters in buffer (if positive) + * or an error (negative) + */ +-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) ++int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused) + { + char *p = buffer; + int l; +@@ -2566,7 +2557,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) + case MPOL_PREFERRED: + nodes_clear(nodes); + if (flags & MPOL_F_LOCAL) +- mode = MPOL_LOCAL; /* pseudo-policy */ ++ mode = MPOL_LOCAL; + else + node_set(pol->v.preferred_node, nodes); + break; +@@ -2574,10 +2565,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) + case MPOL_BIND: + /* Fall through */ + case MPOL_INTERLEAVE: +- if (no_context) +- nodes = pol->w.user_nodemask; +- else +- nodes = pol->v.nodes; ++ nodes = pol->v.nodes; + break; + + default: +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 830893b..c0fa8bd 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) + zone_reclaimable_pages(z) - z->dirty_balance_reserve; + } + /* ++ * Unreclaimable memory (kernel memory or anonymous memory ++ * without swap) can bring down the dirtyable pages below ++ * the zone's dirty balance reserve and the above calculation ++ * will underflow. However we still want to add in nodes ++ * which are below threshold (negative values) to get a more ++ * accurate calculation but make sure that the total never ++ * underflows. ++ */ ++ if ((long)x < 0) ++ x = 0; ++ ++ /* + * Make sure that the number of highmem pages is never larger + * than the number of the total dirtyable memory. This can only + * occur in very strange VM situations but we want to make sure +@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void) + { + unsigned long x; + +- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - +- dirty_balance_reserve; ++ x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); ++ x -= min(x, dirty_balance_reserve); + + if (!vm_highmem_is_dirtyable) + x -= highmem_dirtyable_memory(x); +@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) + * highmem zone can hold its share of dirty pages, so we don't + * care about vm_highmem_is_dirtyable here. + */ +- return zone_page_state(zone, NR_FREE_PAGES) + +- zone_reclaimable_pages(zone) - +- zone->dirty_balance_reserve; ++ unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + ++ zone_reclaimable_pages(zone); ++ ++ /* don't allow this to underflow */ ++ nr_pages -= min(nr_pages, zone->dirty_balance_reserve); ++ return nr_pages; + } + + /** +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c +index 35b8911..fd05c81 100644 +--- a/net/9p/trans_virtio.c ++++ b/net/9p/trans_virtio.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -325,7 +326,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, + int count = nr_pages; + while (nr_pages) { + s = rest_of_page(data); +- pages[index++] = virt_to_page(data); ++ pages[index++] = kmap_to_page(data); + data += s; + nr_pages--; + } +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index b02b75d..c6fcc76 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -119,7 +119,7 @@ batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv) + unsigned int msecs; + + msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; +- msecs += (random32() % 2 * BATADV_JITTER); ++ msecs += random32() % (2 * BATADV_JITTER); + + return jiffies + msecs_to_jiffies(msecs); + } +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index a0a2f97..e48bfdb 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -1793,6 +1793,8 @@ void hci_unregister_dev(struct hci_dev *hdev) + for (i = 0; i < NUM_REASSEMBLY; i++) + kfree_skb(hdev->reassembly[i]); + ++ cancel_work_sync(&hdev->power_on); ++ + if (!test_bit(HCI_INIT, &hdev->flags) && + !test_bit(HCI_SETUP, &hdev->dev_flags)) { + hci_dev_lock(hdev); +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c +index b3226f3..868a909 100644 +--- a/net/bluetooth/rfcomm/sock.c ++++ b/net/bluetooth/rfcomm/sock.c +@@ -467,7 +467,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f + long timeo; + int err = 0; + +- lock_sock(sk); ++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; +@@ -504,7 +504,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f + + release_sock(sk); + timeo = schedule_timeout(timeo); +- lock_sock(sk); ++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c +index dc42b91..caa109d 100644 +--- a/net/bluetooth/sco.c ++++ b/net/bluetooth/sco.c +@@ -131,15 +131,6 @@ static int sco_conn_del(struct hci_conn *hcon, int err) + sco_sock_clear_timer(sk); + sco_chan_del(sk, err); + bh_unlock_sock(sk); +- +- sco_conn_lock(conn); +- conn->sk = NULL; +- sco_pi(sk)->conn = NULL; +- sco_conn_unlock(conn); +- +- if (conn->hcon) +- hci_conn_put(conn->hcon); +- + sco_sock_kill(sk); + } + +@@ -830,6 +821,16 @@ static void sco_chan_del(struct sock *sk, int err) + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + ++ if (conn) { ++ sco_conn_lock(conn); ++ conn->sk = NULL; ++ sco_pi(sk)->conn = NULL; ++ sco_conn_unlock(conn); ++ ++ if (conn->hcon) ++ hci_conn_put(conn->hcon); ++ } ++ + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + sk->sk_state_change(sk); +diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c +index 176ecdb..4f9f5eb 100644 +--- a/net/dccp/ipv4.c ++++ b/net/dccp/ipv4.c +@@ -439,8 +439,8 @@ exit: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + return NULL; + put_and_exit: +- bh_unlock_sock(newsk); +- sock_put(newsk); ++ inet_csk_prepare_forced_close(newsk); ++ dccp_done(newsk); + goto exit; + } + +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 56840b2..6e05981 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -585,7 +585,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, + newinet->inet_rcv_saddr = LOOPBACK4_IPV6; + + if (__inet_inherit_port(sk, newsk) < 0) { +- sock_put(newsk); ++ inet_csk_prepare_forced_close(newsk); ++ dccp_done(newsk); + goto out; + } + __inet6_hash(newsk, NULL); +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index d34ce29..13a13e8 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -699,6 +699,22 @@ void inet_csk_destroy_sock(struct sock *sk) + } + EXPORT_SYMBOL(inet_csk_destroy_sock); + ++/* This function allows to force a closure of a socket after the call to ++ * tcp/dccp_create_openreq_child(). ++ */ ++void inet_csk_prepare_forced_close(struct sock *sk) ++{ ++ /* sk_clone_lock locked the socket and set refcnt to 2 */ ++ bh_unlock_sock(sk); ++ sock_put(sk); ++ ++ /* The below has to be done to allow calling inet_csk_destroy_sock */ ++ sock_set_flag(sk, SOCK_DEAD); ++ percpu_counter_inc(sk->sk_prot->orphan_count); ++ inet_sk(sk)->inet_num = 0; ++} ++EXPORT_SYMBOL(inet_csk_prepare_forced_close); ++ + int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) + { + struct inet_sock *inet = inet_sk(sk); +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 0c4a643..bc3cb46 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -1774,10 +1774,8 @@ exit: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); + return NULL; + put_and_exit: +- tcp_clear_xmit_timers(newsk); +- tcp_cleanup_congestion_control(newsk); +- bh_unlock_sock(newsk); +- sock_put(newsk); ++ inet_csk_prepare_forced_close(newsk); ++ tcp_done(newsk); + goto exit; + } + EXPORT_SYMBOL(tcp_v4_syn_recv_sock); +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index 2edce30..89dab79 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -1333,6 +1333,12 @@ out: + + static void ndisc_redirect_rcv(struct sk_buff *skb) + { ++ u8 *hdr; ++ struct ndisc_options ndopts; ++ struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb); ++ u32 ndoptlen = skb->tail - (skb->transport_header + ++ offsetof(struct rd_msg, opt)); ++ + #ifdef CONFIG_IPV6_NDISC_NODETYPE + switch (skb->ndisc_nodetype) { + case NDISC_NODETYPE_HOST: +@@ -1349,6 +1355,17 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) + return; + } + ++ if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) ++ return; ++ ++ if (!ndopts.nd_opts_rh) ++ return; ++ ++ hdr = (u8 *)ndopts.nd_opts_rh; ++ hdr += 8; ++ if (!pskb_pull(skb, hdr - skb_transport_header(skb))) ++ return; ++ + icmpv6_notify(skb, NDISC_REDIRECT, 0, 0); + } + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 26175bf..73f2a6b 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -1384,7 +1384,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, + #endif + + if (__inet_inherit_port(sk, newsk) < 0) { +- sock_put(newsk); ++ inet_csk_prepare_forced_close(newsk); ++ tcp_done(newsk); + goto out; + } + __inet6_hash(newsk, NULL); +diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c +index f30f6d4..c5e24c8 100644 +--- a/net/mac802154/wpan.c ++++ b/net/mac802154/wpan.c +@@ -387,7 +387,7 @@ void mac802154_wpan_setup(struct net_device *dev) + + static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb) + { +- return netif_rx(skb); ++ return netif_rx_ni(skb); + } + + static int +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 9d75b77..e9ea2f3 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -874,7 +874,7 @@ ok: + q->now = psched_get_time(); + start_at = jiffies; + +- next_event = q->now + 5 * PSCHED_TICKS_PER_SEC; ++ next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC; + + for (level = 0; level < TC_HTB_MAXDEPTH; level++) { + /* common case optimization - skip event handler quickly */ +diff --git a/net/sctp/probe.c b/net/sctp/probe.c +index bc6cd75..5f7518d 100644 +--- a/net/sctp/probe.c ++++ b/net/sctp/probe.c +@@ -122,7 +122,8 @@ static const struct file_operations sctpprobe_fops = { + .llseek = noop_llseek, + }; + +-sctp_disposition_t jsctp_sf_eat_sack(const struct sctp_endpoint *ep, ++sctp_disposition_t jsctp_sf_eat_sack(struct net *net, ++ const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index f9d870e..a9652d6 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2557,10 +2557,6 @@ static int azx_runtime_suspend(struct device *dev) + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip = card->private_data; + +- if (!power_save_controller || +- !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) +- return -EAGAIN; +- + azx_stop_chip(chip); + azx_clear_irq_pending(chip); + return 0; +@@ -2575,12 +2571,25 @@ static int azx_runtime_resume(struct device *dev) + azx_init_chip(chip, 1); + return 0; + } ++ ++static int azx_runtime_idle(struct device *dev) ++{ ++ struct snd_card *card = dev_get_drvdata(dev); ++ struct azx *chip = card->private_data; ++ ++ if (!power_save_controller || ++ !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) ++ return -EBUSY; ++ ++ return 0; ++} ++ + #endif /* CONFIG_PM_RUNTIME */ + + #ifdef CONFIG_PM + static const struct dev_pm_ops azx_pm = { + SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) +- SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, NULL) ++ SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle) + }; + + #define AZX_PM_OPS &azx_pm +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 03b1dc3..a7b522a 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -4453,6 +4453,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = { + }; + + static const struct snd_pci_quirk cxt5066_fixups[] = { ++ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410), +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 71555cc..4642c68 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -431,9 +431,11 @@ static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid) + if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP) + snd_hda_codec_write(codec, pin_nid, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); +- /* Disable pin out until stream is active*/ ++ /* Enable pin out: some machines with GM965 gets broken output when ++ * the pin is disabled or changed while using with HDMI ++ */ + snd_hda_codec_write(codec, pin_nid, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0); ++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + } + + static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid) +@@ -1338,7 +1340,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + struct hdmi_spec *spec = codec->spec; + int pin_idx = hinfo_to_pin_index(spec, hinfo); + hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid; +- int pinctl; + bool non_pcm; + + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid); +@@ -1347,11 +1348,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + + hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream); + +- pinctl = snd_hda_codec_read(codec, pin_nid, 0, +- AC_VERB_GET_PIN_WIDGET_CONTROL, 0); +- snd_hda_codec_write(codec, pin_nid, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT); +- + return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); + } + +@@ -1371,7 +1367,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + int cvt_idx, pin_idx; + struct hdmi_spec_per_cvt *per_cvt; + struct hdmi_spec_per_pin *per_pin; +- int pinctl; + + if (hinfo->nid) { + cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid); +@@ -1388,11 +1383,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + return -EINVAL; + per_pin = &spec->pins[pin_idx]; + +- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0, +- AC_VERB_GET_PIN_WIDGET_CONTROL, 0); +- snd_hda_codec_write(codec, per_pin->pin_nid, 0, +- AC_VERB_SET_PIN_WIDGET_CONTROL, +- pinctl & ~PIN_OUT); + snd_hda_spdif_ctls_unassign(codec, pin_idx); + per_pin->chmap_set = false; + memset(per_pin->chmap, 0, sizeof(per_pin->chmap)); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index ad68d22..4bbabdc 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -5986,6 +5986,30 @@ static void alc269_fixup_quanta_mute(struct hda_codec *codec, + spec->automute_hook = alc269_quanta_automute; + } + ++/* update mute-LED according to the speaker mute state via mic1 VREF pin */ ++static void alc269_fixup_mic1_mute_hook(void *private_data, int enabled) ++{ ++ struct hda_codec *codec = private_data; ++ unsigned int pinval = AC_PINCTL_IN_EN + (enabled ? ++ AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80); ++ snd_hda_set_pin_ctl_cache(codec, 0x18, pinval); ++} ++ ++static void alc269_fixup_mic1_mute(struct hda_codec *codec, ++ const struct alc_fixup *fix, int action) ++{ ++ struct alc_spec *spec = codec->spec; ++ switch (action) { ++ case ALC_FIXUP_ACT_BUILD: ++ spec->vmaster_mute.hook = alc269_fixup_mic1_mute_hook; ++ snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute, true); ++ /* fallthru */ ++ case ALC_FIXUP_ACT_INIT: ++ snd_hda_sync_vmaster_hook(&spec->vmaster_mute); ++ break; ++ } ++} ++ + /* update mute-LED according to the speaker mute state via mic2 VREF pin */ + static void alc269_fixup_mic2_mute_hook(void *private_data, int enabled) + { +@@ -6027,6 +6051,7 @@ enum { + ALC269_FIXUP_DMIC, + ALC269VB_FIXUP_AMIC, + ALC269VB_FIXUP_DMIC, ++ ALC269_FIXUP_MIC1_MUTE_LED, + ALC269_FIXUP_MIC2_MUTE_LED, + ALC269_FIXUP_INV_DMIC, + ALC269_FIXUP_LENOVO_DOCK, +@@ -6153,6 +6178,10 @@ static const struct alc_fixup alc269_fixups[] = { + { } + }, + }, ++ [ALC269_FIXUP_MIC1_MUTE_LED] = { ++ .type = ALC_FIXUP_FUNC, ++ .v.func = alc269_fixup_mic1_mute, ++ }, + [ALC269_FIXUP_MIC2_MUTE_LED] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc269_fixup_mic2_mute, +@@ -6181,6 +6210,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), ++ SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED), + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC), + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), +@@ -6535,8 +6565,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec, + const struct alc_fixup *fix, int action) + { + if (action == ALC_FIXUP_ACT_PRE_PROBE) { +- snd_hda_override_pin_caps(codec, 0x18, 0x00001714); +- snd_hda_override_pin_caps(codec, 0x19, 0x0000171c); ++ snd_hda_override_pin_caps(codec, 0x18, 0x00000734); ++ snd_hda_override_pin_caps(codec, 0x19, 0x0000073c); + } + } + +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index 9ba8af0..f3bae20 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -1724,7 +1724,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658, + "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659, +- "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), ++ "HP Pavilion dv7", STAC_HP_DV7_4000), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A, + "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B, +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index eeefbce..34b9bb7 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -116,6 +116,7 @@ struct snd_usb_midi { + struct list_head list; + struct timer_list error_timer; + spinlock_t disc_lock; ++ struct rw_semaphore disc_rwsem; + struct mutex mutex; + u32 usb_id; + int next_midi_device; +@@ -125,8 +126,10 @@ struct snd_usb_midi { + struct snd_usb_midi_in_endpoint *in; + } endpoints[MIDI_MAX_ENDPOINTS]; + unsigned long input_triggered; +- unsigned int opened; ++ bool autopm_reference; ++ unsigned int opened[2]; + unsigned char disconnected; ++ unsigned char input_running; + + struct snd_kcontrol *roland_load_ctl; + }; +@@ -148,7 +151,6 @@ struct snd_usb_midi_out_endpoint { + struct snd_usb_midi_out_endpoint* ep; + struct snd_rawmidi_substream *substream; + int active; +- bool autopm_reference; + uint8_t cable; /* cable number << 4 */ + uint8_t state; + #define STATE_UNKNOWN 0 +@@ -1033,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi) + snd_usbmidi_input_start(&umidi->list); + } + +-static void substream_open(struct snd_rawmidi_substream *substream, int open) ++static int substream_open(struct snd_rawmidi_substream *substream, int dir, ++ int open) + { + struct snd_usb_midi* umidi = substream->rmidi->private_data; + struct snd_kcontrol *ctl; ++ int err; ++ ++ down_read(&umidi->disc_rwsem); ++ if (umidi->disconnected) { ++ up_read(&umidi->disc_rwsem); ++ return open ? -ENODEV : 0; ++ } + + mutex_lock(&umidi->mutex); + if (open) { +- if (umidi->opened++ == 0 && umidi->roland_load_ctl) { +- ctl = umidi->roland_load_ctl; +- ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; +- snd_ctl_notify(umidi->card, ++ if (!umidi->opened[0] && !umidi->opened[1]) { ++ err = usb_autopm_get_interface(umidi->iface); ++ umidi->autopm_reference = err >= 0; ++ if (err < 0 && err != -EACCES) { ++ mutex_unlock(&umidi->mutex); ++ up_read(&umidi->disc_rwsem); ++ return -EIO; ++ } ++ if (umidi->roland_load_ctl) { ++ ctl = umidi->roland_load_ctl; ++ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; ++ snd_ctl_notify(umidi->card, + SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); +- update_roland_altsetting(umidi); ++ update_roland_altsetting(umidi); ++ } + } ++ umidi->opened[dir]++; ++ if (umidi->opened[1]) ++ snd_usbmidi_input_start(&umidi->list); + } else { +- if (--umidi->opened == 0 && umidi->roland_load_ctl) { +- ctl = umidi->roland_load_ctl; +- ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; +- snd_ctl_notify(umidi->card, ++ umidi->opened[dir]--; ++ if (!umidi->opened[1]) ++ snd_usbmidi_input_stop(&umidi->list); ++ if (!umidi->opened[0] && !umidi->opened[1]) { ++ if (umidi->roland_load_ctl) { ++ ctl = umidi->roland_load_ctl; ++ ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; ++ snd_ctl_notify(umidi->card, + SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); ++ } ++ if (umidi->autopm_reference) ++ usb_autopm_put_interface(umidi->iface); + } + } + mutex_unlock(&umidi->mutex); ++ up_read(&umidi->disc_rwsem); ++ return 0; + } + + static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) +@@ -1063,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) + struct snd_usb_midi* umidi = substream->rmidi->private_data; + struct usbmidi_out_port* port = NULL; + int i, j; +- int err; + + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) + if (umidi->endpoints[i].out) +@@ -1076,25 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) + snd_BUG(); + return -ENXIO; + } +- err = usb_autopm_get_interface(umidi->iface); +- port->autopm_reference = err >= 0; +- if (err < 0 && err != -EACCES) +- return -EIO; ++ + substream->runtime->private_data = port; + port->state = STATE_UNKNOWN; +- substream_open(substream, 1); +- return 0; ++ return substream_open(substream, 0, 1); + } + + static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) + { +- struct snd_usb_midi* umidi = substream->rmidi->private_data; +- struct usbmidi_out_port *port = substream->runtime->private_data; +- +- substream_open(substream, 0); +- if (port->autopm_reference) +- usb_autopm_put_interface(umidi->iface); +- return 0; ++ return substream_open(substream, 0, 0); + } + + static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up) +@@ -1147,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream) + + static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream) + { +- substream_open(substream, 1); +- return 0; ++ return substream_open(substream, 1, 1); + } + + static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream) + { +- substream_open(substream, 0); +- return 0; ++ return substream_open(substream, 1, 0); + } + + static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) +@@ -1403,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p) + * a timer may submit an URB. To reliably break the cycle + * a flag under lock must be used + */ ++ down_write(&umidi->disc_rwsem); + spin_lock_irq(&umidi->disc_lock); + umidi->disconnected = 1; + spin_unlock_irq(&umidi->disc_lock); ++ up_write(&umidi->disc_rwsem); ++ + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { + struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; + if (ep->out) +@@ -2060,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p) + unsigned int i, j; + + umidi = list_entry(p, struct snd_usb_midi, list); ++ if (!umidi->input_running) ++ return; + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { + struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; + if (ep->in) + for (j = 0; j < INPUT_URBS; ++j) + usb_kill_urb(ep->in->urbs[j]); + } ++ umidi->input_running = 0; + } + + static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep) +@@ -2090,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p) + int i; + + umidi = list_entry(p, struct snd_usb_midi, list); ++ if (umidi->input_running || !umidi->opened[1]) ++ return; + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) + snd_usbmidi_input_start_ep(umidi->endpoints[i].in); ++ umidi->input_running = 1; + } + + /* +@@ -2117,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card, + umidi->usb_protocol_ops = &snd_usbmidi_standard_ops; + init_timer(&umidi->error_timer); + spin_lock_init(&umidi->disc_lock); ++ init_rwsem(&umidi->disc_rwsem); + mutex_init(&umidi->mutex); + umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor), + le16_to_cpu(umidi->dev->descriptor.idProduct)); +@@ -2229,9 +2257,6 @@ int snd_usbmidi_create(struct snd_card *card, + } + + list_add_tail(&umidi->list, midi_list); +- +- for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) +- snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + return 0; + } + +diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c +index 5acd6e8..20b0e9e 100644 +--- a/tools/perf/builtin-test.c ++++ b/tools/perf/builtin-test.c +@@ -604,19 +604,13 @@ out_free_threads: + #undef nsyscalls + } + +-static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp, +- size_t *sizep) ++static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) + { +- cpu_set_t *mask; +- size_t size; + int i, cpu = -1, nrcpus = 1024; + realloc: +- mask = CPU_ALLOC(nrcpus); +- size = CPU_ALLOC_SIZE(nrcpus); +- CPU_ZERO_S(size, mask); ++ CPU_ZERO(maskp); + +- if (sched_getaffinity(pid, size, mask) == -1) { +- CPU_FREE(mask); ++ if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) { + if (errno == EINVAL && nrcpus < (1024 << 8)) { + nrcpus = nrcpus << 2; + goto realloc; +@@ -626,19 +620,14 @@ realloc: + } + + for (i = 0; i < nrcpus; i++) { +- if (CPU_ISSET_S(i, size, mask)) { +- if (cpu == -1) { ++ if (CPU_ISSET(i, maskp)) { ++ if (cpu == -1) + cpu = i; +- *maskp = mask; +- *sizep = size; +- } else +- CPU_CLR_S(i, size, mask); ++ else ++ CPU_CLR(i, maskp); + } + } + +- if (cpu == -1) +- CPU_FREE(mask); +- + return cpu; + } + +@@ -653,8 +642,8 @@ static int test__PERF_RECORD(void) + .freq = 10, + .mmap_pages = 256, + }; +- cpu_set_t *cpu_mask = NULL; +- size_t cpu_mask_size = 0; ++ cpu_set_t cpu_mask; ++ size_t cpu_mask_size = sizeof(cpu_mask); + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + struct perf_evsel *evsel; + struct perf_sample sample; +@@ -718,8 +707,7 @@ static int test__PERF_RECORD(void) + evsel->attr.sample_type |= PERF_SAMPLE_TIME; + perf_evlist__config_attrs(evlist, &opts); + +- err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask, +- &cpu_mask_size); ++ err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); + if (err < 0) { + pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno)); + goto out_delete_evlist; +@@ -730,9 +718,9 @@ static int test__PERF_RECORD(void) + /* + * So that we can check perf_sample.cpu on all the samples. + */ +- if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) { ++ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { + pr_debug("sched_setaffinity: %s\n", strerror(errno)); +- goto out_free_cpu_mask; ++ goto out_delete_evlist; + } + + /* +@@ -916,8 +904,6 @@ found_exit: + } + out_err: + perf_evlist__munmap(evlist); +-out_free_cpu_mask: +- CPU_FREE(cpu_mask); + out_delete_evlist: + perf_evlist__delete(evlist); + out: