Skip to content

Commit

Permalink
Merge tag 'v5.11.4' into 5.11
Browse files Browse the repository at this point in the history
This is the 5.11.4 stable release
  • Loading branch information
xanmod committed Mar 7, 2021
2 parents f82c7cd + 79a7af2 commit b73a4c8
Show file tree
Hide file tree
Showing 132 changed files with 1,513 additions and 585 deletions.
2 changes: 1 addition & 1 deletion Documentation/devicetree/bindings/net/btusb.txt
Expand Up @@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
compatible = "usb1286,204e";
reg = <1>;
interrupt-parent = <&gpio0>;
interrupt-name = "wakeup";
interrupt-names = "wakeup";
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
};
};
Expand Up @@ -205,6 +205,11 @@ properties:
Indicates that full-duplex is used. When absent, half
duplex is assumed.

pause:
$ref: /schemas/types.yaml#definitions/flag
description:
Indicates that pause should be enabled.

asym-pause:
$ref: /schemas/types.yaml#/definitions/flag
description:
Expand Down
7 changes: 3 additions & 4 deletions Documentation/networking/ip-sysctl.rst
Expand Up @@ -630,16 +630,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max

default: initial size of receive buffer used by TCP sockets.
This value overrides net.core.rmem_default used by other protocols.
Default: 87380 bytes. This value results in window of 65535 with
default setting of tcp_adv_win_scale and tcp_app_win:0 and a bit
less for default tcp_app_win. See below about these variables.
Default: 131072 bytes.
This value results in initial window of 65535.

max: maximal size of receive buffer allowed for automatically
selected receiver buffers for TCP socket. This value does not override
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
automatic tuning of that socket's receive buffer size, in which
case this value is ignored.
Default: between 87380B and 6MB, depending on RAM size.
Default: between 131072 and 6MB, depending on RAM size.

tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 3
SUBLEVEL = 4
EXTRAVERSION =
NAME = 💕 Valentine's Day Edition 💕

Expand Down
35 changes: 31 additions & 4 deletions arch/arm/xen/p2m.c
Expand Up @@ -93,12 +93,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i;

for (i = 0; i < count; i++) {
struct gnttab_unmap_grant_ref unmap;
int rc;

if (map_ops[i].status)
continue;
if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
return -ENOMEM;
}
if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT)))
continue;

/*
* Signal an error for this slot. This in turn requires
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap.host_addr = map_ops[i].host_addr,
unmap.handle = map_ops[i].handle;
map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map)
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
else
unmap.dev_bus_addr = 0;

/*
* Pre-populate the status field, to be recognizable in
* the log message below.
*/
unmap.status = 1;

rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
&unmap, 1);
if (rc || unmap.status != GNTST_okay)
pr_err_once("gnttab unmap failed: rc=%d st=%d\n",
rc, unmap.status);
}

return 0;
Expand Down
4 changes: 4 additions & 0 deletions arch/parisc/kernel/irq.c
Expand Up @@ -373,7 +373,11 @@ static inline int eirr_to_irq(unsigned long eirr)
/*
* IRQ STACK - used for irq handler
*/
#ifdef CONFIG_64BIT
#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
#endif

union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
Expand Down
21 changes: 5 additions & 16 deletions arch/riscv/mm/init.c
Expand Up @@ -226,8 +226,6 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;

#define MAX_EARLY_MAPPING_SIZE SZ_128M

pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);

void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
Expand Down Expand Up @@ -302,13 +300,7 @@ static void __init create_pte_mapping(pte_t *ptep,

pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;

#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
#define NUM_EARLY_PMDS 1UL
#else
#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);

static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
Expand All @@ -330,11 +322,9 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)

static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
uintptr_t pmd_num;
BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);

pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
BUG_ON(pmd_num >= NUM_EARLY_PMDS);
return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
return (uintptr_t)early_pmd;
}

static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
Expand Down Expand Up @@ -452,7 +442,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
uintptr_t va, pa, end_va;
uintptr_t load_pa = (uintptr_t)(&_start);
uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
Expand All @@ -464,12 +454,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* Enforce boot alignment requirements of RV32 and
* RV64 by only allowing PMD or PGD mappings.
*/
BUG_ON(map_size == PAGE_SIZE);
map_size = PMD_SIZE;

/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((load_pa % map_size) != 0);
BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);

pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/events/intel/core.c
Expand Up @@ -4397,6 +4397,9 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
Expand Down
12 changes: 12 additions & 0 deletions arch/x86/include/asm/xen/page.h
Expand Up @@ -86,6 +86,18 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
}
#endif

/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define XEN_EXTRA_MEM_RATIO (10)

/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/module.c
Expand Up @@ -114,6 +114,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
*location += sym->st_value;
break;
case R_386_PC32:
case R_386_PLT32:
/* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
Expand Down
9 changes: 9 additions & 0 deletions arch/x86/kernel/reboot.c
Expand Up @@ -477,6 +477,15 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
},
},

{ /* PCIe Wifi card isn't detected after reboot otherwise */
.callback = set_pci_reboot,
.ident = "Zotac ZBOX CI327 nano",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NA"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"),
},
},

/* Sony */
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
Expand Down
12 changes: 8 additions & 4 deletions arch/x86/tools/relocs.c
Expand Up @@ -867,9 +867,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
case R_386_PLT32:
/*
* NONE can be ignored and PC relative relocations don't
* need to be adjusted.
* NONE can be ignored and PC relative relocations don't need
* to be adjusted. Because sym must be defined, R_386_PLT32 can
* be treated the same way as R_386_PC32.
*/
break;

Expand Down Expand Up @@ -910,9 +912,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
case R_386_PLT32:
/*
* NONE can be ignored and PC relative relocations don't
* need to be adjusted.
* NONE can be ignored and PC relative relocations don't need
* to be adjusted. Because sym must be defined, R_386_PLT32 can
* be treated the same way as R_386_PC32.
*/
break;

Expand Down
54 changes: 47 additions & 7 deletions arch/x86/xen/p2m.c
Expand Up @@ -416,6 +416,9 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn;

p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;

vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
Expand Down Expand Up @@ -652,10 +655,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep;
unsigned int level;

if (unlikely(pfn >= xen_p2m_size)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
}
/* Only invalid entries allowed above the highest p2m covered frame. */
if (unlikely(pfn >= xen_p2m_size))
return mfn == INVALID_P2M_ENTRY;

/*
* The interface requires atomic updates on p2m elements.
Expand Down Expand Up @@ -710,6 +712,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,

for (i = 0; i < count; i++) {
unsigned long mfn, pfn;
struct gnttab_unmap_grant_ref unmap[2];
int rc;

/* Do not add to override if the map failed. */
if (map_ops[i].status != GNTST_okay ||
Expand All @@ -727,10 +731,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,

WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");

if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
continue;

/*
* Signal an error for this slot. This in turn requires
* immediate unmapping.
*/
map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr,
unmap[0].handle = map_ops[i].handle;
map_ops[i].handle = ~0;
if (map_ops[i].flags & GNTMAP_device_map)
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
else
unmap[0].dev_bus_addr = 0;

if (kmap_ops) {
kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr,
unmap[1].handle = kmap_ops[i].handle;
kmap_ops[i].handle = ~0;
if (kmap_ops[i].flags & GNTMAP_device_map)
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
else
unmap[1].dev_bus_addr = 0;
}

/*
* Pre-populate both status fields, to be recognizable in
* the log message below.
*/
unmap[0].status = 1;
unmap[1].status = 1;

rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
unmap, 1 + !!kmap_ops);
if (rc || unmap[0].status != GNTST_okay ||
unmap[1].status != GNTST_okay)
pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
rc, unmap[0].status, unmap[1].status);
}

out:
Expand Down
25 changes: 3 additions & 22 deletions arch/x86/xen/setup.c
Expand Up @@ -59,18 +59,6 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;

/*
* The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios
* of base:extra, all the base memory can be filled with page
* structures for the extra memory, leaving no space for anything
* else.
*
* 10x seems like a reasonable balance between scaling flexibility and
* leaving a practically usable system.
*/
#define EXTRA_MEM_RATIO (10)

static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);

static void __init xen_parse_512gb(void)
Expand Down Expand Up @@ -790,20 +778,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;

/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
* size is the full initial memory allocation; on highmem it
* is limited to the max size of lowmem, so that it doesn't
* get completely filled.
* Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
* factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*
* In principle there could be a problem in lowmem systems if
* the initial memory is also very large with respect to
* lowmem, but we won't try to deal with that here.
*/
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
Expand Down

0 comments on commit b73a4c8

Please sign in to comment.