Skip to content

Commit

Permalink
Merge tag 'v6.6.17' into 6.6-main
Browse files Browse the repository at this point in the history
This is the 6.6.17 stable release

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmXPpTcACgkQONu9yGCS
# aT5LtRAAmoyR9UpT4+TAuhfpmZd1AD8t8gtOQhksagLk/1mkz09teLt3MFtdzusG
# 3nlCHOVQUeSG0RmpJL2BvNSF9A0vgN2b0anMNd8wJVL6aVTaxshD+/SQkilcRYFi
# l3QYpvfu3fPewQg6sL/+xRu3U+AVAsznFEFLDFgyMikNK19AR+fUOsFOZ3pTE+fO
# aB5Z8qEI7YiOSI4Z4NpBIFHx6y6IyPqVmVN2Gq78C19Wlc/gKyP2fymtIfrtn2D3
# hIoZbBFekexXNdJA4Pt8vCe9y7/mYqYmUo7fbL/913pkmdu1cipYm6EhnR0yaSFN
# +zAIZ+xZO7eNN0gMaOaxmgeej8g4sRk/qQgh7zK59yotxqGl6lkTe6TlvOsrsAIl
# iZc753AL4gJ+HXAxTQ7iriu6gb+rScxi8sNxCyEArNiownhcyq7+kcz7Br81qA3G
# hnUeYT7nkY7xX6/GtYpEI4EwvoywZRViPyp1NEp3DTelwnvOXPqjJ7Wcovz4U5y0
# ecEnpchH9R6Yqd5inkhOqqV9pxLiFv4p7umc4/1TTwVXsTqpz5QZB3/k1+2ZUWxS
# bIA9djMyu6aETioRm8UJE30NV/BqO8uAbUTc1vefk9Lrr/Kic8H+a+frxie6GAbt
# D8EOJc526PxlASj51R67W0I4rUljwWrcaZt8Pk21esA100rqXoM=
# =Sule
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri Feb 16 19:11:03 2024 CET
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
  • Loading branch information
frank-w committed Mar 29, 2024
2 parents 6a8d38e + b2c9bf0 commit 74d36de
Show file tree
Hide file tree
Showing 157 changed files with 1,701 additions and 1,017 deletions.
2 changes: 1 addition & 1 deletion Documentation/process/changes.rst
Expand Up @@ -31,7 +31,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 11.0.0 clang --version
Rust (optional) 1.71.1 rustc --version
Rust (optional) 1.73.0 rustc --version
bindgen (optional) 0.65.1 bindgen --version
GNU make 3.82 make --version
bash 4.2 bash --version
Expand Down
1 change: 1 addition & 0 deletions MAINTAINERS
Expand Up @@ -23630,6 +23630,7 @@ F: include/xen/arm/swiotlb-xen.h
F: include/xen/swiotlb-xen.h

XFS FILESYSTEM
M: Catherine Hoang <catherine.hoang@oracle.com>
M: Chandan Babu R <chandan.babu@oracle.com>
R: Darrick J. Wong <djwong@kernel.org>
L: linux-xfs@vger.kernel.org
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 16
SUBLEVEL = 17
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

Expand Down
1 change: 1 addition & 0 deletions arch/arc/include/asm/cacheflush.h
Expand Up @@ -40,6 +40,7 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);

/* TBD: optimize this */
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()

#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/include/asm/cacheflush.h
Expand Up @@ -340,6 +340,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
dsb(ishst);
}

#define flush_cache_vmap_early(start, end) do { } while (0)

static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (!cache_is_vipt_nonaliasing())
Expand Down
1 change: 1 addition & 0 deletions arch/csky/abiv1/inc/abi/cacheflush.h
Expand Up @@ -43,6 +43,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
*/
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) cache_wbinv_all()
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) cache_wbinv_all()

#define flush_icache_range(start, end) cache_wbinv_range(start, end)
Expand Down
1 change: 1 addition & 0 deletions arch/csky/abiv2/inc/abi/cacheflush.h
Expand Up @@ -41,6 +41,7 @@ void flush_icache_mm_range(struct mm_struct *mm,
void flush_icache_deferred(struct mm_struct *mm);

#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
Expand Down
1 change: 1 addition & 0 deletions arch/m68k/include/asm/cacheflush_mm.h
Expand Up @@ -191,6 +191,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
#define flush_cache_all() __flush_cache_all()

#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()

static inline void flush_cache_mm(struct mm_struct *mm)
Expand Down
2 changes: 2 additions & 0 deletions arch/mips/include/asm/cacheflush.h
Expand Up @@ -97,6 +97,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
__flush_cache_vmap();
}

#define flush_cache_vmap_early(start, end) do { } while (0)

extern void (*__flush_cache_vunmap)(void);

static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
Expand Down
1 change: 1 addition & 0 deletions arch/nios2/include/asm/cacheflush.h
Expand Up @@ -38,6 +38,7 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
#define flush_icache_pages flush_icache_pages

#define flush_cache_vmap(start, end) flush_dcache_range(start, end)
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end)

extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
Expand Down
1 change: 1 addition & 0 deletions arch/parisc/include/asm/cacheflush.h
Expand Up @@ -41,6 +41,7 @@ void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);

#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vmap_early(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) flush_cache_all()

void flush_dcache_folio(struct folio *folio);
Expand Down
3 changes: 2 additions & 1 deletion arch/riscv/include/asm/cacheflush.h
Expand Up @@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page)
flush_icache_mm(vma->vm_mm, 0)

#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
#endif

#ifndef CONFIG_SMP
Expand Down
3 changes: 3 additions & 0 deletions arch/riscv/include/asm/hugetlb.h
Expand Up @@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags

bool arch_hugetlb_migration_supported(struct hstate *h);
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported

#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
Expand Down
3 changes: 0 additions & 3 deletions arch/riscv/include/asm/sbi.h
Expand Up @@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_send_ipi(unsigned int cpu);
int sbi_remote_fence_i(const struct cpumask *cpu_mask);
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);

int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start,
Expand Down
5 changes: 5 additions & 0 deletions arch/riscv/include/asm/stacktrace.h
Expand Up @@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}


#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
#endif /* CONFIG_VMAP_STACK */

#endif /* _ASM_RISCV_STACKTRACE_H */
8 changes: 7 additions & 1 deletion arch/riscv/include/asm/tlb.h
Expand Up @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);

static inline void tlb_flush(struct mmu_gather *tlb)
{
flush_tlb_mm(tlb->mm);
#ifdef CONFIG_MMU
if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
tlb_get_unmap_size(tlb));
#endif
}

#endif /* _ASM_RISCV_TLB_H */
17 changes: 13 additions & 4 deletions arch/riscv/include/asm/tlbflush.h
Expand Up @@ -11,6 +11,9 @@
#include <asm/smp.h>
#include <asm/errata_list.h>

#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)

#ifdef CONFIG_MMU
extern unsigned long asid_mask;

Expand All @@ -32,9 +35,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int page_size);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
Expand All @@ -51,14 +58,16 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
local_flush_tlb_all();
}

#define flush_tlb_mm(mm) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */

/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
local_flush_tlb_all();
}

#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */

#endif /* _ASM_RISCV_TLBFLUSH_H */
32 changes: 10 additions & 22 deletions arch/riscv/kernel/sbi.c
Expand Up @@ -11,6 +11,7 @@
#include <linux/reboot.h>
#include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>

/* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
Expand Down Expand Up @@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
}
EXPORT_SYMBOL(sbi_remote_fence_i);

/**
* sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
* harts for the specified virtual address range.
* @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
cpu_mask, start, size, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma);

/**
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
* remote harts for a virtual address range belonging to a specific ASID.
* remote harts for a virtual address range belonging to a specific ASID or not.
*
* @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
* @asid: The value of address space identifier (ASID).
* @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
* for flushing all address spaces.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
Expand All @@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long size,
unsigned long asid)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
cpu_mask, start, size, asid, 0);
if (asid == FLUSH_TLB_NO_ASID)
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
cpu_mask, start, size, 0, 0);
else
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
cpu_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);

Expand Down
78 changes: 73 additions & 5 deletions arch/riscv/mm/hugetlbpage.c
Expand Up @@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte;
}

unsigned long hugetlb_mask_last_page(struct hstate *h)
{
unsigned long hp_size = huge_page_size(h);

switch (hp_size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return P4D_SIZE - PUD_SIZE;
#endif
case PMD_SIZE:
return PUD_SIZE - PMD_SIZE;
case napot_cont_size(NAPOT_CONT64KB_ORDER):
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
default:
break;
}

return 0UL;
}

static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
Expand Down Expand Up @@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
return entry;
}

static void clear_flush(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
unsigned long i, saddr = addr;

for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
ptep_get_and_clear(mm, addr, ptep);

flush_tlb_range(&vma, saddr, addr);
}

/*
* When dealing with NAPOT mappings, the privileged specification indicates that
* "if an update needs to be made, the OS generally should first mark all of the
* PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
* within the range, [...] then update the PTE(s), as described in Section
* 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
* arm64.
*/
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
pte_t pte,
unsigned long sz)
{
unsigned long hugepage_shift;
unsigned long hugepage_shift, pgsize;
int i, pte_num;

if (sz >= PGDIR_SIZE)
Expand All @@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
hugepage_shift = PAGE_SHIFT;

pte_num = sz >> hugepage_shift;
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
pgsize = 1 << hugepage_shift;

if (!pte_present(pte)) {
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_ptes(mm, addr, ptep, pte, 1);
return;
}

if (!pte_napot(pte)) {
set_ptes(mm, addr, ptep, pte, 1);
return;
}

clear_flush(mm, addr, ptep, pgsize, pte_num);

for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
}

Expand Down Expand Up @@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_clear(mm, addr, ptep);
}

static __init bool is_napot_size(unsigned long size)
static bool is_napot_size(unsigned long size)
{
unsigned long order;

Expand Down Expand Up @@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);

#else

static __init bool is_napot_size(unsigned long size)
static bool is_napot_size(unsigned long size)
{
return false;
}
Expand All @@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd);
}

bool __init arch_hugetlb_valid_size(unsigned long size)
static bool __hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
return true;
Expand All @@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return false;
}

bool __init arch_hugetlb_valid_size(unsigned long size)
{
return __hugetlb_valid_size(size);
}

bool arch_hugetlb_migration_supported(struct hstate *h)
{
return __hugetlb_valid_size(huge_page_size(h));
}

#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
Expand Down
4 changes: 4 additions & 0 deletions arch/riscv/mm/init.c
Expand Up @@ -1502,6 +1502,10 @@ void __init misc_mem_init(void)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* The entire VMEMMAP region has been populated. Flush TLB for this region */
local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
#endif
zone_sizes_init();
reserve_crashkernel();
memblock_dump_all();
Expand Down

0 comments on commit 74d36de

Please sign in to comment.