Skip to content

Commit

Permalink
Revert "mm: x86, arm64: add arch_has_hw_pte_young()"
Browse files Browse the repository at this point in the history
This reverts commit b480620.
  • Loading branch information
xanmod committed Dec 15, 2021
1 parent d704d88 commit bcde0eb
Show file tree
Hide file tree
Showing 7 changed files with 21 additions and 41 deletions.
5 changes: 0 additions & 5 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -779,11 +779,6 @@ static inline bool system_supports_tlb_range(void)
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
}

static inline bool system_has_hw_af(void)
{
return IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && cpus_have_const_cap(ARM64_HW_AF);
}

extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);

static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
Expand Down
13 changes: 5 additions & 8 deletions arch/arm64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -999,24 +999,21 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* page after fork() + CoW for pfn mappings. We don't always have a
* hardware-managed access flag on arm64.
*/
static inline bool arch_has_hw_pte_young(bool local)
static inline bool arch_faults_on_old_pte(void)
{
if (local) {
WARN_ON(preemptible());
return cpu_has_hw_af();
}
WARN_ON(preemptible());

return system_has_hw_af();
return !cpu_has_hw_af();
}
#define arch_has_hw_pte_young arch_has_hw_pte_young
#define arch_faults_on_old_pte arch_faults_on_old_pte

/*
* Experimentally, it's cheap to set the access flag in hardware and we
* benefit from prefaulting mappings as 'old' to start with.
*/
static inline bool arch_wants_old_prefaulted_pte(void)
{
return arch_has_hw_pte_young(true);
return !arch_faults_on_old_pte();
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte

Expand Down
10 changes: 0 additions & 10 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -2161,16 +2161,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_hw_dbm,
.cpu_enable = cpu_enable_hw_dbm,
},
{
.desc = "Hardware update of the Access flag",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_HW_AF,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
.min_field_value = 1,
.matches = has_cpuid_feature,
},
#endif
{
.desc = "CRC32 instructions",
Expand Down
1 change: 0 additions & 1 deletion arch/arm64/tools/cpucaps
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ HAS_STAGE2_FWB
HAS_SYSREG_GIC_CPUIF
HAS_TLB_RANGE
HAS_VIRT_HOST_EXTN
HW_AF
HW_DBM
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -1398,10 +1398,10 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}

#define arch_has_hw_pte_young arch_has_hw_pte_young
static inline bool arch_has_hw_pte_young(bool local)
#define arch_faults_on_old_pte arch_faults_on_old_pte
static inline bool arch_faults_on_old_pte(void)
{
return true;
return false;
}

#endif /* __ASSEMBLY__ */
Expand Down
13 changes: 0 additions & 13 deletions include/linux/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -258,19 +258,6 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif

#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported by the local CPU or all CPUs.
*
* Those arches which have hw access flag feature need to implement their own
* helper. By default, "false" means pagefault will be hit on old pte.
*/
static inline bool arch_has_hw_pte_young(bool local)
{
return false;
}
#endif

#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
Expand Down
14 changes: 13 additions & 1 deletion mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,18 @@ int randomize_va_space __read_mostly =
2;
#endif

#ifndef arch_faults_on_old_pte
static inline bool arch_faults_on_old_pte(void)
{
/*
* Those arches which don't have hw access flag feature need to
* implement their own helper. By default, "true" means pagefault
* will be hit on old pte.
*/
return true;
}
#endif

#ifndef arch_wants_old_prefaulted_pte
static inline bool arch_wants_old_prefaulted_pte(void)
{
Expand Down Expand Up @@ -2758,7 +2770,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
if (!arch_has_hw_pte_young(true) && !pte_young(vmf->orig_pte)) {
if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
pte_t entry;

vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
Expand Down

0 comments on commit bcde0eb

Please sign in to comment.