Skip to content

Commit

Permalink
arm64: Support a shared release for spin-table
Browse files Browse the repository at this point in the history
When releasing multiple CPUs that share a release address we need them
to wait for their turn to boot. Add a mechanism to do this by booting
them until they enable the TLB before waiting their turn to enter
init_secondary.

Reviewed by:	jhibbits, kevans
Sponsored by:	Arm Ltd
Differential Revision:	https://reviews.freebsd.org/D45082
  • Loading branch information
zxombie committed May 10, 2024
1 parent e353ac0 commit c78ebc6
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 9 deletions.
55 changes: 51 additions & 4 deletions sys/arm64/arm64/locore.S
Expand Up @@ -29,6 +29,7 @@
#include <sys/syscall.h>
#include <machine/asm.h>
#include <machine/armreg.h>
#include <machine/cpu.h>
#include <machine/hypervisor.h>
#include <machine/param.h>
#include <machine/pte.h>
Expand Down Expand Up @@ -192,12 +193,50 @@ END(_start)

#ifdef SMP
/*
* mpentry(unsigned long)
* void
* mpentry_psci(unsigned long)
*
* Called by a core when it is being brought online.
* Called by a core when it is being brought online with psci.
* The data in x0 is passed straight to init_secondary.
*/
ENTRY(mpentry)
ENTRY(mpentry_psci)
mov x26, xzr
b mpentry_common
END(mpentry_psci)

/*
* void
* mpentry_spintable(void)
*
* Called by a core when it is being brought online with a spin-table.
* Reads the new CPU ID and passes this to init_secondary.
*/
ENTRY(mpentry_spintable)
ldr x26, =spintable_wait
b mpentry_common
END(mpentry_spintable)

/* Wait for the current CPU to be released */
LENTRY(spintable_wait)
/* Read the affinity bits from mpidr_el1 */
mrs x1, mpidr_el1
ldr x2, =CPU_AFF_MASK
and x1, x1, x2

adrp x2, ap_cpuid
1:
ldr x0, [x2, :lo12:ap_cpuid]
cmp x0, x1
b.ne 1b

str xzr, [x2, :lo12:ap_cpuid]
dsb sy
sev

ret
LEND(mpentry_spintable)

LENTRY(mpentry_common)
/* Disable interrupts */
msr daifset, #DAIF_INTR

Expand Down Expand Up @@ -228,6 +267,14 @@ ENTRY(mpentry)
mp_virtdone:
BTI_J

/*
* Allow this CPU to wait until the kernel is ready for it,
* e.g. with spin-table but each CPU uses the same release address
*/
cbz x26, 1f
blr x26
1:

/* Start using the AP boot stack */
adrp x4, bootstack
ldr x4, [x4, :lo12:bootstack]
Expand Down Expand Up @@ -258,7 +305,7 @@ mp_virtdone:
msr tpidr_el1, x18

b init_secondary
END(mpentry)
LEND(mpentry_common)
#endif

/*
Expand Down
21 changes: 16 additions & 5 deletions sys/arm64/arm64/mp_machdep.c
Expand Up @@ -106,14 +106,16 @@ static void ipi_stop(void *);
static u_int fdt_cpuid;
#endif

void mpentry(unsigned long cpuid);
void mpentry_psci(unsigned long cpuid);
void mpentry_spintable(void);
void init_secondary(uint64_t);

/* Synchronize AP startup. */
static struct mtx ap_boot_mtx;

/* Used to initialize the PCPU ahead of calling init_secondary(). */
void *bootpcpu;
uint64_t ap_cpuid;

/* Stacks for AP initialization, discarded once idle threads are started. */
void *bootstack;
Expand Down Expand Up @@ -420,7 +422,10 @@ enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
{
vm_paddr_t *release_addr;

release_addr = pmap_mapdev(release_paddr, sizeof(*release_addr));
ap_cpuid = cpu & CPU_AFF_MASK;

release_addr = pmap_mapdev_attr(release_paddr, sizeof(*release_addr),
VM_MEMATTR_DEFAULT);
if (release_addr == NULL)
return (ENOMEM);

Expand All @@ -432,6 +437,10 @@ enable_cpu_spin(uint64_t cpu, vm_paddr_t entry, vm_paddr_t release_paddr)
"sev \n"
::: "memory");

/* Wait for the target CPU to start */
while (atomic_load_64(&ap_cpuid) != 0)
__asm __volatile("wfe");

return (0);
}

Expand Down Expand Up @@ -475,18 +484,20 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain, vm_paddr_t release_addr)
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;

printf("Starting CPU %u (%lx)\n", cpuid, target_cpu);
pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry);

/*
* A limited set of hardware we support can only do spintables and
* remain useful, due to lack of EL3. Thus, we'll usually fall into the
* PSCI branch here.
*/
MPASS(release_addr == 0 || !psci_present);
if (release_addr != 0)
if (release_addr != 0) {
pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_spintable);
err = enable_cpu_spin(target_cpu, pa, release_addr);
else
} else {
pa = pmap_extract(kernel_pmap, (vm_offset_t)mpentry_psci);
err = enable_cpu_psci(target_cpu, pa, cpuid);
}

if (err != 0) {
pcpu_destroy(pcpup);
Expand Down

0 comments on commit c78ebc6

Please sign in to comment.