Skip to content

Commit d5963d8

Browse files
author
Ingo Molnar
committed
Merge tag 'v5.1-rc7' into x86/mm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 3db6d5a + 37624b5 commit d5963d8

File tree

60 files changed

+404
-224
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+404
-224
lines changed

Documentation/sysctl/vm.txt

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
866866
increase the success rate of future high-order allocations such as SLUB
867867
allocations, THP and hugetlbfs pages.
868868

869-
To make it sensible with respect to the watermark_scale_factor parameter,
870-
the unit is in fractions of 10,000. The default value of 15,000 means
871-
that up to 150% of the high watermark will be reclaimed in the event of
872-
a pageblock being mixed due to fragmentation. The level of reclaim is
873-
determined by the number of fragmentation events that occurred in the
874-
recent past. If this value is smaller than a pageblock then a pageblocks
875-
worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
876-
of 0 will disable the feature.
869+
To make it sensible with respect to the watermark_scale_factor
870+
parameter, the unit is in fractions of 10,000. The default value of
871+
15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
872+
watermark will be reclaimed in the event of a pageblock being mixed due
873+
to fragmentation. The level of reclaim is determined by the number of
874+
fragmentation events that occurred in the recent past. If this value is
875+
smaller than a pageblock then a pageblocks worth of pages will be reclaimed
876+
(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
877877

878878
=============================================================
879879

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
VERSION = 5
33
PATCHLEVEL = 1
44
SUBLEVEL = 0
5-
EXTRAVERSION = -rc6
5+
EXTRAVERSION = -rc7
66
NAME = Shy Crocodile
77

88
# *DOCUMENTATION*

arch/arm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ config ARM
7373
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
7474
select HAVE_EXIT_THREAD
7575
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
76-
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
76+
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
7777
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
7878
select HAVE_GCC_PLUGINS
7979
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)

arch/arm/Kconfig.debug

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@ config DEBUG_WX
4747

4848
choice
4949
prompt "Choose kernel unwinder"
50-
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
51-
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
50+
default UNWINDER_ARM if AEABI
51+
default UNWINDER_FRAME_POINTER if !AEABI
5252
help
5353
This determines which method will be used for unwinding kernel stack
5454
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
6565

6666
config UNWINDER_ARM
6767
bool "ARM EABI stack unwinder"
68-
depends on AEABI
68+
depends on AEABI && !FUNCTION_GRAPH_TRACER
6969
select ARM_UNWIND
7070
help
7171
This option enables stack unwinding support in the kernel

arch/arm/boot/compressed/head.S

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
14381438

14391439
@ Preserve return value of efi_entry() in r4
14401440
mov r4, r0
1441-
bl cache_clean_flush
1441+
1442+
@ our cache maintenance code relies on CP15 barrier instructions
1443+
@ but since we arrived here with the MMU and caches configured
1444+
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
1445+
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
1446+
@ the enable path will be executed on v7+ only.
1447+
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
1448+
tst r1, #(1 << 5) @ CP15BEN bit set?
1449+
bne 0f
1450+
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
1451+
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
1452+
ARM( .inst 0xf57ff06f @ v7+ isb )
1453+
THUMB( isb )
1454+
1455+
0: bl cache_clean_flush
14421456
bl cache_off
14431457

14441458
@ Set parameters for booting zImage according to boot protocol

arch/arm/kernel/head-nommu.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,9 @@ __secondary_data:
133133
*/
134134
.text
135135
__after_proc_init:
136-
#ifdef CONFIG_ARM_MPU
137136
M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
138137
M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
138+
#ifdef CONFIG_ARM_MPU
139139
M_CLASS(ldr r3, [r12, 0x50])
140140
AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
141141
and r3, r3, #(MMFR0_PMSA) @ PMSA field

arch/arm64/kernel/ftrace.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,15 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
103103
* to be revisited if support for multiple ftrace entry points
104104
* is added in the future, but for now, the pr_err() below
105105
* deals with a theoretical issue only.
106+
*
107+
* Note that PLTs are place relative, and plt_entries_equal()
108+
* checks whether they point to the same target. Here, we need
109+
* to check if the actual opcodes are in fact identical,
110+
* regardless of the offset in memory so use memcmp() instead.
106111
*/
107112
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
108-
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
109-
&trampoline)) {
113+
if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
114+
sizeof(trampoline))) {
110115
if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
111116
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
112117
return -EINVAL;

arch/arm64/mm/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
363363
* Otherwise, this is a no-op
364364
*/
365365
u64 base = phys_initrd_start & PAGE_MASK;
366-
u64 size = PAGE_ALIGN(phys_initrd_size);
366+
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
367367

368368
/*
369369
* We can only add back the initrd memory if we don't end up

arch/powerpc/configs/skiroot_defconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
266266
CONFIG_MSDOS_FS=m
267267
CONFIG_VFAT_FS=m
268268
CONFIG_PROC_KCORE=y
269+
CONFIG_HUGETLBFS=y
269270
# CONFIG_MISC_FILESYSTEMS is not set
270271
# CONFIG_NETWORK_FILESYSTEMS is not set
271272
CONFIG_NLS=y

arch/powerpc/mm/mmu_context_iommu.c

Lines changed: 58 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
9595
unsigned long entries, unsigned long dev_hpa,
9696
struct mm_iommu_table_group_mem_t **pmem)
9797
{
98-
struct mm_iommu_table_group_mem_t *mem;
99-
long i, ret, locked_entries = 0;
98+
struct mm_iommu_table_group_mem_t *mem, *mem2;
99+
long i, ret, locked_entries = 0, pinned = 0;
100100
unsigned int pageshift;
101-
102-
mutex_lock(&mem_list_mutex);
103-
104-
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
105-
next) {
106-
/* Overlap? */
107-
if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
108-
(ua < (mem->ua +
109-
(mem->entries << PAGE_SHIFT)))) {
110-
ret = -EINVAL;
111-
goto unlock_exit;
112-
}
113-
114-
}
101+
unsigned long entry, chunk;
115102

116103
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
117104
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
118105
if (ret)
119-
goto unlock_exit;
106+
return ret;
120107

121108
locked_entries = entries;
122109
}
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
148135
}
149136

150137
down_read(&mm->mmap_sem);
151-
ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
138+
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
139+
sizeof(struct vm_area_struct *);
140+
chunk = min(chunk, entries);
141+
for (entry = 0; entry < entries; entry += chunk) {
142+
unsigned long n = min(entries - entry, chunk);
143+
144+
ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
145+
FOLL_WRITE, mem->hpages + entry, NULL);
146+
if (ret == n) {
147+
pinned += n;
148+
continue;
149+
}
150+
if (ret > 0)
151+
pinned += ret;
152+
break;
153+
}
152154
up_read(&mm->mmap_sem);
153-
if (ret != entries) {
154-
/* free the reference taken */
155-
for (i = 0; i < ret; i++)
156-
put_page(mem->hpages[i]);
157-
158-
vfree(mem->hpas);
159-
kfree(mem);
160-
ret = -EFAULT;
161-
goto unlock_exit;
155+
if (pinned != entries) {
156+
if (!ret)
157+
ret = -EFAULT;
158+
goto free_exit;
162159
}
163160

164161
pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
183180
}
184181

185182
good_exit:
186-
ret = 0;
187183
atomic64_set(&mem->mapped, 1);
188184
mem->used = 1;
189185
mem->ua = ua;
190186
mem->entries = entries;
191-
*pmem = mem;
192187

193-
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
188+
mutex_lock(&mem_list_mutex);
194189

195-
unlock_exit:
196-
if (locked_entries && ret)
197-
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
190+
list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
191+
/* Overlap? */
192+
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
193+
(ua < (mem2->ua +
194+
(mem2->entries << PAGE_SHIFT)))) {
195+
ret = -EINVAL;
196+
mutex_unlock(&mem_list_mutex);
197+
goto free_exit;
198+
}
199+
}
200+
201+
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
198202

199203
mutex_unlock(&mem_list_mutex);
200204

205+
*pmem = mem;
206+
207+
return 0;
208+
209+
free_exit:
210+
/* free the reference taken */
211+
for (i = 0; i < pinned; i++)
212+
put_page(mem->hpages[i]);
213+
214+
vfree(mem->hpas);
215+
kfree(mem);
216+
217+
unlock_exit:
218+
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
219+
201220
return ret;
202221
}
203222

@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
266285
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
267286
{
268287
long ret = 0;
269-
unsigned long entries, dev_hpa;
288+
unsigned long unlock_entries = 0;
270289

271290
mutex_lock(&mem_list_mutex);
272291

@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
287306
goto unlock_exit;
288307
}
289308

309+
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
310+
unlock_entries = mem->entries;
311+
290312
/* @mapped became 0 so now mappings are disabled, release the region */
291-
entries = mem->entries;
292-
dev_hpa = mem->dev_hpa;
293313
mm_iommu_release(mem);
294314

295-
if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
296-
mm_iommu_adjust_locked_vm(mm, entries, false);
297-
298315
unlock_exit:
299316
mutex_unlock(&mem_list_mutex);
300317

318+
mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
319+
301320
return ret;
302321
}
303322
EXPORT_SYMBOL_GPL(mm_iommu_put);

0 commit comments

Comments
 (0)