Skip to content
Permalink
Browse files
microblaze: Enable experimental SMP functionality
This code requires:
- MicroBlaze v8.50.a or later
- AXI Intc v1.04.a or later
- System cache v2.00.a or later
- Per cpu interrupt controllers
- One global timer followed by per cpu timers
- Interrupt controllers with 4 software interrupts for ipi
- Identical interrupt assignments to all interrupt controllers
- Secondary cpu cores in sleep mode, jumping to kernel at wakeup

Secondary cpus are wakeup through software interrupts which are enabled
when interrupt controller driver is probed. All interrupts are setup as per
cpu one because there is missing handling for distribution among cpus.
Allocating certain irq to certain cpu is not supported now.

Signed-off-by: Stefan Asserhall <stefan.asserhall@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
State: pending
[michals:
v5.15 update
microblaze: Add missing cacheflush.h for smp.c
]
  • Loading branch information
Michal Simek committed Feb 1, 2022
1 parent 08dfb7d commit 23b920bb9d53a7fcb8e9938f70228ae4d137f244
Show file tree
Hide file tree
Showing 12 changed files with 716 additions and 2 deletions.
@@ -65,6 +65,33 @@ config CPU_LITTLE_ENDIAN

endchoice

config SMP
bool "SMP support (EXPERIMENTAL)"
default n
help
This option enables SMP support for MicroBlaze. Every CPU has its own
BRAM connected via LMB. The BRAM is used as CPU private memory, which
is one reason CPU hotplug is not yet supported.
Timers and interrupt controllers are placed on the same bus and
accessible by all CPUs, but every CPU is assigned one timer and one
interrupt controller. There is also one free running clock source
timer for the whole system. The boot CPU wakes up other CPUs by
sending a wake-up software Interrupt to a specific CPU that is
sleeping. Wake-up will cause a jump to DDR start address where it is
assumed that the kernel is placed. There is currently no support for
placing the kernel at a different location.

config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT

config NR_CPUS
int "Maximum number of CPUs (2-8)"
range 2 8
depends on SMP
default "2"

config ARCH_HAS_ILOG2_U32
def_bool n

@@ -10,7 +10,6 @@
#ifndef _ASM_MICROBLAZE_ENTRY_H
#define _ASM_MICROBLAZE_ENTRY_H

#include <asm/percpu.h>
#include <asm/ptrace.h>
#include <linux/linkage.h>

@@ -21,12 +20,23 @@

#define PER_CPU(var) var

#ifdef CONFIG_SMP
/* Addresses in BRAM */
#define CURRENT_SAVE_ADDR 0x50
#define ENTRY_SP_ADDR 0x54
#define PT_POOL_SPACE_ADDR 0x100
#endif /* CONFIG_SMP */

# ifndef __ASSEMBLY__
#include <asm/percpu.h>

#ifndef CONFIG_SMP
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
DECLARE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
#endif /* CONFIG_SMP */

extern asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall);
# endif /* __ASSEMBLY__ */
@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Xilinx, Inc.
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef _ASM_MICROBLAZE_HARDIRQ_H
#define _ASM_MICROBLAZE_HARDIRQ_H

# ifndef CONFIG_SMP
#include <asm-generic/hardirq.h>
# else
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/threads.h>
#include <asm/irq.h>
#include <linux/irq.h>

typedef struct {
unsigned int __softirq_pending;
unsigned int ipi_irqs[MICROBLAZE_NUM_IPIS];
} ____cacheline_aligned irq_cpustat_t;

#define __ARCH_IRQ_STAT
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);

#define local_softirq_pending_ref irq_stat.__softirq_pending

#define __inc_irq_stat(cpu, member) this_cpu_inc(irq_stat.member)
#define __get_irq_stat(cpu, member) this_cpu_read(irq_stat.member)

u64 smp_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu smp_irq_stat_cpu

extern unsigned long irq_err_count;

static inline void ack_bad_irq(unsigned int irq)
{
irq_err_count++;
}
# endif /* CONFIG_MMU */

#endif /* _ASM_MICROBLAZE_HARDIRQ_H */
@@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* smp.h: MicroBlaze-specific SMP code
*
* Original was a copy of PowerPC smp.h, which was a copy of
* sparc smp.h. Now heavily modified for PPC.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
* Copyright (C) 2013-2020 Xilinx, Inc.
*/

#ifndef _ASM_MICROBLAZE_SMP_H
#define _ASM_MICROBLAZE_SMP_H

#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>

#include <asm/percpu.h>

void handle_IPI(int ipinr, struct pt_regs *regs);

void set_smp_cross_call(void (*)(unsigned int, unsigned int));

void smp_send_debugger_break(void);

#define raw_smp_processor_id() (current_thread_info()->cpu)

enum microblaze_msg {
MICROBLAZE_MSG_RESCHEDULE = 0,
MICROBLAZE_MSG_CALL_FUNCTION,
MICROBLAZE_MSG_CALL_FUNCTION_SINGLE,
MICROBLAZE_MSG_DEBUGGER_BREAK,
MICROBLAZE_NUM_IPIS
};

void start_secondary(void);
extern struct thread_info *secondary_ti;
void secondary_machine_init(void);

void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask);

#endif /* _ASM_MICROBLAZE_SMP_H */
@@ -26,5 +26,6 @@ obj-y += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_SMP) += smp.o

obj-y += entry.o
@@ -42,6 +42,11 @@ syscall_debug_table:
.space (__NR_syscalls * 4)
#endif /* DEBUG */

#ifdef CONFIG_SMP
#define CURRENT_SAVE CURRENT_SAVE_ADDR
#define ENTRY_SP ENTRY_SP_ADDR
#endif /* CONFIG_SMP */

#define C_ENTRY(name) .globl name; .align 4; name

/*
@@ -91,6 +96,10 @@ syscall_debug_table:
.macro clear_vms_ums
msrclr r0, MSR_VMS | MSR_UMS
.endm

.macro save_clear_vm
msrclr r11, MSR_VM
.endm
#else
.macro clear_bip
mfs r11, rmsr
@@ -153,6 +162,12 @@ syscall_debug_table:
andni r11, r11, (MSR_VMS|MSR_UMS)
mts rmsr,r11
.endm

.macro save_clear_vm
mfs r11, rmsr
andni r11, r11, MSR_VM
mts rmsr,r11
.endm
#endif

/* Define how to call high-level functions. With MMU, virtual mode must be
@@ -252,9 +267,22 @@ syscall_debug_table:
mts rmsr , r11; \
RESTORE_REGS_GP

#ifndef CONFIG_SMP
#define LOAD_PER_CPU(reg, addr) lwi reg, r0, TOPHYS(PER_CPU(addr));
#define STORE_PER_CPU(reg, addr) swi reg, r0, TOPHYS(PER_CPU(addr));
#define STORE_PER_CPU_VM(reg, addr) swi reg, r0, PER_CPU(addr);
#else
#define LOAD_PER_CPU(reg, addr) lwi reg, r0, PER_CPU(addr);
#define STORE_PER_CPU(reg, addr) swi reg, r0, PER_CPU(addr);
#define STORE_PER_CPU_VM(reg, addr) \
save_clear_vm; \
bri TOPHYS(1f); \
1: \
swi reg, r0, PER_CPU(addr); \
mts rmsr, r11; \
bri __phys_to_virt(2f); \
2:
#endif /* CONFIG_SMP */

#define SAVE_STATE \
STORE_PER_CPU(r1, ENTRY_SP) /* save stack */ \
@@ -30,13 +30,15 @@

#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <linux/of_fdt.h> /* for OF_DT_HEADER */

#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>

.section .data
.global empty_zero_page
@@ -46,6 +48,11 @@ empty_zero_page:
.global swapper_pg_dir
swapper_pg_dir:
.space PAGE_SIZE
#ifdef CONFIG_SMP
temp_boot_stack:
.space 1024
#define CURRENT_SAVE CURRENT_SAVE_ADDR
#endif /* CONFIG_SMP */

.section .rodata
.align 4
@@ -76,6 +83,13 @@ real_start:
msrclr r8, 0 /* clear nothing - just read msr for test */
cmpu r8, r8, r1 /* r1 must contain msr reg content */

#ifdef CONFIG_SMP
/* skip FDT copy if secondary */
mfs r11, rpvr0
andi r11, r11, 0xFF
bnei r11, _setup_initial_mmu
#endif /* CONFIG_SMP */

/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
We ensure r7 points to a valid FDT, just in case the bootloader
@@ -142,6 +156,7 @@ _copy_bram:
#endif
/* We have to turn on the MMU right away. */

_setup_initial_mmu:
/*
* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 16 MBytes of memory 1:1
@@ -316,6 +331,18 @@ jump_over2:
*/
turn_on_mmu:
ori r15,r0,start_here
#ifdef CONFIG_SMP
/*
* Read PVR and mask off all but CPU id bits to use to select
* boot sequence
*/
mfs r4, rpvr0
andi r4, r4, 0xFF

beqi r4, finish
ori r15, r0, start_secondary_cpu
finish:
#endif /* CONFIG_SMP */
ori r4,r0,MSR_KERNEL_VMS
mts rmsr,r4
nop
@@ -333,6 +360,10 @@ start_here:

/* Initialize r31 with current task address */
addik r31, r0, init_task
#ifdef CONFIG_MMU
/* save current for CPU 0 */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
#endif

addik r11, r0, machine_early_init
brald r15, r11
@@ -371,3 +402,61 @@ kernel_load_context:
nop
rted r17, 0 /* enable MMU and jump to start_kernel */
nop

#ifdef CONFIG_SMP
/* Entry point for secondary processors */
start_secondary_cpu:

/* Initialize small data anchors */
addik r13, r0, _KERNEL_SDA_BASE_
addik r2, r0, _KERNEL_SDA2_BASE_

/* Initialize stack pointer */
addik r1, r0, temp_boot_stack + 1024 - 4

/*
* Initialize the exception table.
*/
addik r11, r0, secondary_machine_init
brald r15, r11
nop

lwi r1, r0, secondary_ti

/* Initialize r31 with current task address */
lwi CURRENT_TASK, r1, TI_TASK
/* save current for secondary CPU */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);

/* Initialize stack pointer */
addi r1, r1, THREAD_SIZE - 4
swi r0, r1, 0

/* Initialize MMU */
ori r11, r0, 0x10000000
mts rzpr, r11

ori r15, r0, TOPHYS(kernel_load_context_secondary)
ori r4, r0, MSR_KERNEL
mts rmsr, r4
nop
bri 4
rted r15, 0
nop

/* Load up the kernel context */
kernel_load_context_secondary:
# Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
ori r5, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx, r5
nop
mts rtlbhi, r0
nop
addi r15, r0, machine_halt
ori r17, r0, start_secondary
ori r4, r0, MSR_KERNEL_VMS
mts rmsr, r4
nop
rted r17, 0 /* enable MMU and jump to start_kernel */
nop
#endif /* CONFIG_SMP */
@@ -243,10 +243,15 @@
*/

/* wrappers to restore state before coming to entry.S */
#ifdef CONFIG_SMP
#define CURRENT_SAVE __phys_to_virt(CURRENT_SAVE_ADDR)
#define pt_pool_space __phys_to_virt(PT_POOL_SPACE_ADDR)
#else
.section .data
.align 4
pt_pool_space:
.space PT_SIZE
#endif /* CONFIG_SMP */

#ifdef DEBUG
/* Create space for exception counting. */

0 comments on commit 23b920b

Please sign in to comment.