Skip to content

Commit

Permalink
powerpc/watchpoint: Move DAWR detection logic outside of hw_breakpoint.c
Browse files Browse the repository at this point in the history
Power10 hw has multiple DAWRs but hw doesn't tell which DAWR caused
the exception. So we have a sw logic to detect that in hw_breakpoint.c.
But hw_breakpoint.c gets compiled only with CONFIG_HAVE_HW_BREAKPOINT=Y.
Move DAWR detection logic outside of hw_breakpoint.c so that it can be
reused when CONFIG_HAVE_HW_BREAKPOINT is not set.

Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
  • Loading branch information
Ravi Bangoria authored and intel-lab-lkp committed Aug 25, 2020
1 parent 1a095cb commit 4899293
Show file tree
Hide file tree
Showing 4 changed files with 174 additions and 158 deletions.
8 changes: 8 additions & 0 deletions arch/powerpc/include/asm/hw_breakpoint.h
Expand Up @@ -10,6 +10,7 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H

#include <asm/cpu_has_feature.h>
#include <asm/inst.h>

#ifdef __KERNEL__
struct arch_hw_breakpoint {
Expand Down Expand Up @@ -52,6 +53,13 @@ static inline int nr_wp_slots(void)
return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1;
}

bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info);

void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
int *type, int *size, unsigned long *ea);

#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/kernel/Makefile
Expand Up @@ -45,7 +45,8 @@ obj-y := cputable.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o firmware.o
of_platform.o prom_parse.o firmware.o \
hw_breakpoint_constraints.o
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o \
paca.o nvram_64.o note.o syscall_64.o
Expand Down
159 changes: 2 additions & 157 deletions arch/powerpc/kernel/hw_breakpoint.c
Expand Up @@ -494,161 +494,6 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
}
}

static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
return ((info->address <= dar) && (dar - info->address < info->len));
}

static bool ea_user_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
return ((ea < info->address + info->len) &&
(ea + size > info->address));
}

static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;

hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);

return ((hw_start_addr <= dar) && (hw_end_addr > dar));
}

static bool ea_hw_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;
unsigned long align_size = HW_BREAKPOINT_SIZE;

/*
* On p10 predecessors, quadword is handle differently then
* other instructions.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
align_size = HW_BREAKPOINT_SIZE_QUADWORD;

hw_start_addr = ALIGN_DOWN(info->address, align_size);
hw_end_addr = ALIGN(info->address + info->len, align_size);

return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
}

/*
* If hw has multiple DAWR registers, we also need to check all
* dawrx constraint bits to confirm this is _really_ a valid event.
* If type is UNKNOWN, but privilege level matches, consider it as
* a positive match.
*/
static bool check_dawrx_constraints(struct pt_regs *regs, int type,
struct arch_hw_breakpoint *info)
{
if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
return false;

/*
* The Cache Management instructions other than dcbz never
* cause a match. i.e. if type is CACHEOP, the instruction
* is dcbz, and dcbz is treated as Store.
*/
if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
return false;

if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
return false;

if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
return false;

return true;
}

/*
* Return true if the event is valid wrt dawr configuration,
* including extraneous exception. Otherwise return false.
*/
static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info)
{
bool in_user_range = dar_in_user_range(regs->dar, info);
bool dawrx_constraints;

/*
* 8xx supports only one breakpoint and thus we can
* unconditionally return true.
*/
if (IS_ENABLED(CONFIG_PPC_8xx)) {
if (!in_user_range)
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}

if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;

return true;
}

dawrx_constraints = check_dawrx_constraints(regs, type, info);

if (type == UNKNOWN) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;

return dawrx_constraints;
}

if (ea_user_range_overlaps(ea, size, info))
return dawrx_constraints;

if (ea_hw_range_overlaps(ea, size, info)) {
if (dawrx_constraints) {
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}
}
return false;
}

static int cache_op_size(void)
{
#ifdef __powerpc64__
return ppc64_caches.l1d.block_size;
#else
return L1_CACHE_BYTES;
#endif
}

static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
int *type, int *size, unsigned long *ea)
{
struct instruction_op op;

if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
return;

analyse_instr(&op, regs, *instr);
*type = GETTYPE(op.type);
*ea = op.ea;
#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
*ea &= 0xffffffffUL;
#endif

*size = GETSIZE(op.type);
if (*type == CACHEOP) {
*size = cache_op_size();
*ea &= ~(*size - 1);
} else if (*type == LOAD_VMX || *type == STORE_VMX) {
*ea &= ~(*size - 1);
}
}

static bool is_larx_stcx_instr(int type)
{
return type == LARX || type == STCX;
Expand Down Expand Up @@ -732,7 +577,7 @@ int hw_breakpoint_handler(struct die_args *args)
rcu_read_lock();

if (!IS_ENABLED(CONFIG_PPC_8xx))
get_instr_detail(regs, &instr, &type, &size, &ea);
wp_get_instr_detail(regs, &instr, &type, &size, &ea);

for (i = 0; i < nr_wp_slots(); i++) {
bp[i] = __this_cpu_read(bp_per_reg[i]);
Expand All @@ -742,7 +587,7 @@ int hw_breakpoint_handler(struct die_args *args)
info[i] = counter_arch_bp(bp[i]);
info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;

if (check_constraints(regs, instr, ea, type, size, info[i])) {
if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
if (!IS_ENABLED(CONFIG_PPC_8xx) &&
ppc_inst_equal(instr, ppc_inst(0))) {
handler_error(bp[i], info[i]);
Expand Down
162 changes: 162 additions & 0 deletions arch/powerpc/kernel/hw_breakpoint_constraints.c
@@ -0,0 +1,162 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <asm/hw_breakpoint.h>
#include <asm/sstep.h>
#include <asm/cache.h>

static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
return ((info->address <= dar) && (dar - info->address < info->len));
}

static bool ea_user_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
return ((ea < info->address + info->len) &&
(ea + size > info->address));
}

static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;

hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);

return ((hw_start_addr <= dar) && (hw_end_addr > dar));
}

static bool ea_hw_range_overlaps(unsigned long ea, int size,
struct arch_hw_breakpoint *info)
{
unsigned long hw_start_addr, hw_end_addr;
unsigned long align_size = HW_BREAKPOINT_SIZE;

/*
* On p10 predecessors, quadword is handle differently then
* other instructions.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
align_size = HW_BREAKPOINT_SIZE_QUADWORD;

hw_start_addr = ALIGN_DOWN(info->address, align_size);
hw_end_addr = ALIGN(info->address + info->len, align_size);

return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
}

/*
* If hw has multiple DAWR registers, we also need to check all
* dawrx constraint bits to confirm this is _really_ a valid event.
* If type is UNKNOWN, but privilege level matches, consider it as
* a positive match.
*/
static bool check_dawrx_constraints(struct pt_regs *regs, int type,
struct arch_hw_breakpoint *info)
{
if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
return false;

/*
* The Cache Management instructions other than dcbz never
* cause a match. i.e. if type is CACHEOP, the instruction
* is dcbz, and dcbz is treated as Store.
*/
if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
return false;

if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
return false;

if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
return false;

return true;
}

/*
* Return true if the event is valid wrt dawr configuration,
* including extraneous exception. Otherwise return false.
*/
bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info)
{
bool in_user_range = dar_in_user_range(regs->dar, info);
bool dawrx_constraints;

/*
* 8xx supports only one breakpoint and thus we can
* unconditionally return true.
*/
if (IS_ENABLED(CONFIG_PPC_8xx)) {
if (!in_user_range)
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}

if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;

return true;
}

dawrx_constraints = check_dawrx_constraints(regs, type, info);

if (type == UNKNOWN) {
if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!dar_in_hw_range(regs->dar, info))
return false;

return dawrx_constraints;
}

if (ea_user_range_overlaps(ea, size, info))
return dawrx_constraints;

if (ea_hw_range_overlaps(ea, size, info)) {
if (dawrx_constraints) {
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
return true;
}
}
return false;
}

static int cache_op_size(void)
{
#ifdef __powerpc64__
return ppc64_caches.l1d.block_size;
#else
return L1_CACHE_BYTES;
#endif
}

void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
int *type, int *size, unsigned long *ea)
{
struct instruction_op op;

if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
return;

analyse_instr(&op, regs, *instr);
*type = GETTYPE(op.type);
*ea = op.ea;
#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
*ea &= 0xffffffffUL;
#endif

*size = GETSIZE(op.type);
if (*type == CACHEOP) {
*size = cache_op_size();
*ea &= ~(*size - 1);
} else if (*type == LOAD_VMX || *type == STORE_VMX) {
*ea &= ~(*size - 1);
}
}

0 comments on commit 4899293

Please sign in to comment.