Skip to content

Commit

Permalink
arch: powerpc: fix building after binutils changes.
Browse files Browse the repository at this point in the history
'dssall' in mmu_context.c is an altivec instruction, build that accordingly.
'ptesync' is a PPC64 instruction, so dont go there for if not.
And apparently ifdef __powerpc64__ isnt enough in all configurations and
'stbcix' and friends, all POWER6 instructions hopefully not needed by
CONFIG_PPC64 in general, wanted to play.

                 Signed-off-by: Micahel B Heltne <michael.heltne@gmail.com>
  • Loading branch information
threader committed Jan 23, 2022
1 parent e783362 commit 226efa0
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 6 deletions.
7 changes: 4 additions & 3 deletions arch/powerpc/include/asm/io.h
Expand Up @@ -334,7 +334,7 @@ static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
}
#define __raw_writel __raw_writel

#ifdef __powerpc64__
#ifdef CONFIG_PPC64
static inline unsigned long __raw_readq(const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
Expand All @@ -352,7 +352,8 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
__raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
}
#define __raw_writeq_be __raw_writeq_be

#endif
#ifdef CONFIG_POWER6_CPU
/*
* Real mode versions of the above. Those instructions are only supposed
* to be used in hypervisor real mode as per the architecture spec.
Expand Down Expand Up @@ -417,7 +418,7 @@ static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
: "=r" (ret) : "r" (paddr) : "memory");
return ret;
}
#endif /* __powerpc64__ */
#endif /* CONFIG_POWER6_CPU */

/*
*
Expand Down
4 changes: 3 additions & 1 deletion arch/powerpc/lib/sstep.c
Expand Up @@ -1465,7 +1465,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
switch ((word >> 1) & 0x3ff) {
case 598: /* sync */
op->type = BARRIER + BARRIER_SYNC;
#ifdef __powerpc64__
#ifdef CONFIG_PPC64
switch ((word >> 21) & 3) {
case 1: /* lwsync */
op->type = BARRIER + BARRIER_LWSYNC;
Expand Down Expand Up @@ -3267,9 +3267,11 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
#ifdef CONFIG_PPC64
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
#endif
}
break;

Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/mm/Makefile
Expand Up @@ -4,6 +4,9 @@
#

ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
ifeq ($(CONFIG_ALTIVEC),y)
CFLAGS_mmu_context.o += $(call cc-option, -maltivec, -mabi=altivec)
endif

obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/mm/pageattr.c
Expand Up @@ -54,11 +54,11 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
}

pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);

#ifdef CONFIG_PPC64
/* See ptesync comment in radix__set_pte_at() */
if (radix_enabled())
asm volatile("ptesync": : :"memory");

#endif
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

spin_unlock(&init_mm.page_table_lock);
Expand Down

0 comments on commit 226efa0

Please sign in to comment.