Skip to content

Commit

Permalink
mips64, loongson, octeon: switch to clockintr
Browse files Browse the repository at this point in the history
- Remove mips64-specific clock interrupt scheduling bits from cpu_info.
- Add missing tick_nsec initialization to cpu_initclocks().
- Disable the glxclk interrupt clock on loongson.  visa@/miod@ say it
  can be removed later if it isn't useful for anything else.
- Wire up cp0_intrclock.

Notes:

- The loongson apm_suspend() changes are untested, but deraadt@ claims
  APM suspend/resume on loongson doesn't work anyway.
- loongson and octeon now have a randomized statclock(), stathz = hz.

With input from miod@, visa@.  Tested by miod@, visa@.

Link: https://marc.info/?l=openbsd-tech&m=166776379603497&w=2

ok visa@ mlarkin@

(cherry picked from commit f124c57)
  • Loading branch information
skotchandsoda authored and johnny-mnemonic committed Apr 1, 2023
1 parent 12d0007 commit 6b3715e
Show file tree
Hide file tree
Showing 6 changed files with 121 additions and 74 deletions.
9 changes: 8 additions & 1 deletion sys/arch/loongson/dev/apm.c
@@ -1,4 +1,4 @@
/* $OpenBSD: apm.c,v 1.40 2022/04/06 18:59:26 naddy Exp $ */
/* $OpenBSD: apm.c,v 1.41 2022/11/19 16:23:48 cheloha Exp $ */

/*-
* Copyright (c) 2001 Alexander Guy. All rights reserved.
Expand Down Expand Up @@ -38,6 +38,7 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/clockintr.h>
#include <sys/device.h>
#include <sys/fcntl.h>
#include <sys/ioctl.h>
Expand Down Expand Up @@ -415,7 +416,13 @@ apm_suspend(int state)
if (rv == 0)
rv = sys_platform->resume();
}

inittodr(gettime()); /* Move the clock forward */
#ifdef __HAVE_CLOCKINTR
clockintr_cpu_init(NULL);
clockintr_trigger();
#endif

config_suspend_all(DVACT_RESUME);

cold = 0;
Expand Down
5 changes: 4 additions & 1 deletion sys/arch/loongson/dev/glxclk.c
@@ -1,4 +1,4 @@
/* $OpenBSD: glxclk.c,v 1.7 2022/08/18 06:31:36 miod Exp $ */
/* $OpenBSD: glxclk.c,v 1.8 2022/11/19 16:23:48 cheloha Exp $ */

/*
* Copyright (c) 2013 Paul Irofti.
Expand Down Expand Up @@ -114,6 +114,9 @@ glxclk_attach(struct device *parent, struct device *self, void *aux)
u_int64_t wa;
int statint, minint;

printf(" not configured\n");
return;

glxclk_sc->sc_iot = gaa->gaa_iot;
glxclk_sc->sc_ioh = gaa->gaa_ioh;

Expand Down
4 changes: 3 additions & 1 deletion sys/arch/mips64/include/_types.h
@@ -1,4 +1,4 @@
/* $OpenBSD: _types.h,v 1.23 2018/03/05 01:15:25 deraadt Exp $ */
/* $OpenBSD: _types.h,v 1.24 2022/11/19 16:23:48 cheloha Exp $ */

/*-
* Copyright (c) 1990, 1993
Expand Down Expand Up @@ -35,6 +35,8 @@
#ifndef _MIPS64__TYPES_H_
#define _MIPS64__TYPES_H_

#define __HAVE_CLOCKINTR

/*
* _ALIGN(p) rounds p (pointer or byte index) up to a correctly-aligned
* value for all data types (int, long, ...). The result is an
Expand Down
7 changes: 4 additions & 3 deletions sys/arch/mips64/include/cpu.h
@@ -1,4 +1,4 @@
/* $OpenBSD: cpu.h,v 1.139 2022/08/22 00:35:06 cheloha Exp $ */
/* $OpenBSD: cpu.h,v 1.140 2022/11/19 16:23:48 cheloha Exp $ */

/*-
* Copyright (c) 1992, 1993
Expand Down Expand Up @@ -120,6 +120,7 @@

#if defined(_KERNEL) && !defined(_LOCORE)

#include <sys/clockintr.h>
#include <sys/device.h>
#include <machine/intr.h>
#include <sys/sched.h>
Expand Down Expand Up @@ -193,8 +194,8 @@ struct cpu_info {
uint32_t ci_softpending; /* pending soft interrupts */
int ci_clock_started;
volatile int ci_clock_deferred; /* clock interrupt postponed */
u_int32_t ci_cpu_counter_last; /* last compare value loaded */
u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */
struct clockintr_queue
ci_queue;

u_int32_t ci_pendingticks;

Expand Down
156 changes: 97 additions & 59 deletions sys/arch/mips64/mips64/clock.c
@@ -1,4 +1,4 @@
/* $OpenBSD: clock.c,v 1.47 2022/10/31 13:59:10 visa Exp $ */
/* $OpenBSD: clock.c,v 1.48 2022/11/19 16:23:48 cheloha Exp $ */

/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
Expand Down Expand Up @@ -38,15 +38,19 @@
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/atomic.h>
#include <sys/clockintr.h>
#include <sys/device.h>
#include <sys/evcount.h>
#include <sys/stdint.h>

#include <machine/autoconf.h>
#include <machine/cpu.h>
#include <mips64/mips_cpu.h>

static struct evcount cp0_clock_count;
static int cp0_clock_irq = 5;
uint64_t cp0_nsec_cycle_ratio;
uint64_t cp0_nsec_max;

int clockmatch(struct device *, void *, void *);
void clockattach(struct device *, struct device *, void *);
Expand All @@ -59,9 +63,18 @@ const struct cfattach clock_ca = {
sizeof(struct device), clockmatch, clockattach
};

void cp0_startclock(struct cpu_info *);
void cp0_trigger_int5(void);
void cp0_rearm_int5(void *, uint64_t);
void cp0_trigger_int5_wrapper(void *);

const struct intrclock cp0_intrclock = {
.ic_rearm = cp0_rearm_int5,
.ic_trigger = cp0_trigger_int5_wrapper
};

uint32_t cp0_int5(uint32_t, struct trapframe *);
void cp0_startclock(struct cpu_info *);
void cp0_trigger_int5(void);
void cp0_trigger_int5_masked(void);

int
clockmatch(struct device *parent, void *vcf, void *aux)
Expand All @@ -77,8 +90,13 @@ clockmatch(struct device *parent, void *vcf, void *aux)
void
clockattach(struct device *parent, struct device *self, void *aux)
{
uint64_t cp0_freq = curcpu()->ci_hw.clock / CP0_CYCLE_DIVIDER;

printf(": int 5\n");

cp0_nsec_cycle_ratio = cp0_freq * (1ULL << 32) / 1000000000;
cp0_nsec_max = UINT64_MAX / cp0_nsec_cycle_ratio;

/*
* We need to register the interrupt now, for idle_mask to
* be computed correctly.
Expand All @@ -103,56 +121,30 @@ clockattach(struct device *parent, struct device *self, void *aux)
uint32_t
cp0_int5(uint32_t mask, struct trapframe *tf)
{
u_int32_t clkdiff, pendingticks = 0;
struct cpu_info *ci = curcpu();
int s;

atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count);

cp0_set_compare(cp0_get_count() - 1); /* clear INT5 */

/*
* If we got an interrupt before we got ready to process it,
* retrigger it as far as possible. cpu_initclocks() will
* take care of retriggering it correctly.
* Just ignore the interrupt if we're not ready to process it.
* cpu_initclocks() will retrigger it later.
*/
if (ci->ci_clock_started == 0) {
cp0_set_compare(cp0_get_count() - 1);

if (!ci->ci_clock_started)
return CR_INT_5;
}

/*
* If the clock interrupt is logically masked, defer all
* work until it is logically unmasked from splx(9).
*/
if (tf->ipl >= IPL_CLOCK) {
ci->ci_clock_deferred = 1;
cp0_set_compare(cp0_get_count() - 1);
return CR_INT_5;
}
ci->ci_clock_deferred = 0;

/*
* Count how many ticks have passed since the last clock interrupt...
*/
clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
while (clkdiff >= ci->ci_cpu_counter_interval) {
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
pendingticks++;
}
pendingticks++;
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;

/*
* Set up next tick, and check if it has just been hit; in this
* case count it and schedule one tick ahead.
*/
cp0_set_compare(ci->ci_cpu_counter_last);
clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
if ((int)clkdiff >= 0) {
ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
pendingticks++;
cp0_set_compare(ci->ci_cpu_counter_last);
}

/*
* Process clock interrupt.
*/
Expand All @@ -163,22 +155,65 @@ cp0_int5(uint32_t mask, struct trapframe *tf)
sr = getsr();
ENABLEIPI();
#endif
while (pendingticks) {
atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count);
hardclock(tf);
pendingticks--;
}
clockintr_dispatch(tf);
#ifdef MULTIPROCESSOR
setsr(sr);
#endif
ci->ci_ipl = s;

return CR_INT_5; /* Clock is always on 5 */
}

/*
* Trigger the clock interrupt.
*
* Arm INT5 to fire after the given number of nanoseconds have elapsed.
* Only try once. If we miss, let cp0_trigger_int5_masked() handle it.
*/
void
cp0_rearm_int5(void *unused, uint64_t nsecs)
{
uint32_t cycles, t0, t1, target;
register_t sr;

if (nsecs > cp0_nsec_max)
nsecs = cp0_nsec_max;
cycles = (nsecs * cp0_nsec_cycle_ratio) >> 32;

/*
* Set compare, then immediately reread count. If INT5 is not
* pending then we need to check if we missed. If t0 + cycles
* did not overflow then we need t0 <= t1 < target. Otherwise,
* there are two valid constraints: either t0 <= t1 or t1 < target
* show we didn't miss.
*/
sr = disableintr();
t0 = cp0_get_count();
target = t0 + cycles;
cp0_set_compare(target);
t1 = cp0_get_count();
if (!ISSET(cp0_get_cause(), CR_INT_5)) {
if (t0 <= target) {
if (target <= t1 || t1 < t0)
cp0_trigger_int5_masked();
} else {
if (t1 < t0 && target <= t1)
cp0_trigger_int5_masked();
}
}
setsr(sr);
}

void
cp0_trigger_int5(void)
{
register_t sr;

sr = disableintr();
cp0_trigger_int5_masked();
setsr(sr);
}

/*
* Arm INT5 to fire as soon as possible.
*
* We need to spin until either (a) INT5 is pending or (b) the compare
* register leads the count register, i.e. we know INT5 will be pending
* very soon.
Expand All @@ -190,33 +225,38 @@ cp0_int5(uint32_t mask, struct trapframe *tf)
* to arm the timer on most Octeon hardware.
*/
void
cp0_trigger_int5(void)
cp0_trigger_int5_masked(void)
{
uint32_t compare, offset = 16;
int leading = 0;
register_t sr;

sr = disableintr();
while (!leading && !ISSET(cp0_get_cause(), CR_INT_5)) {
while (!ISSET(cp0_get_cause(), CR_INT_5) && !leading) {
compare = cp0_get_count() + offset;
cp0_set_compare(compare);
leading = (int32_t)(compare - cp0_get_count()) > 0;
offset *= 2;
}
setsr(sr);
}

void
cp0_trigger_int5_wrapper(void *unused)
{
cp0_trigger_int5();
}

/*
* Start the real-time and statistics clocks. Leave stathz 0 since there
* are no other timers available.
* Start the clock interrupt dispatch cycle.
*/
void
cp0_startclock(struct cpu_info *ci)
{
int s;

#ifdef MULTIPROCESSOR
if (!CPU_IS_PRIMARY(ci)) {
if (CPU_IS_PRIMARY(ci)) {
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
} else {
s = splhigh();
nanouptime(&ci->ci_schedstate.spc_runtime);
splx(s);
Expand All @@ -226,14 +266,12 @@ cp0_startclock(struct cpu_info *ci)

cp0_calibrate(ci);
}
#endif

clockintr_cpu_init(&cp0_intrclock);

/* Start the clock. */
s = splclock();
ci->ci_cpu_counter_interval =
(ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz;
ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval;
cp0_set_compare(ci->ci_cpu_counter_last);
ci->ci_clock_started++;
ci->ci_clock_started = 1;
clockintr_trigger();
splx(s);
}
14 changes: 5 additions & 9 deletions sys/arch/mips64/mips64/mips64_machdep.c
@@ -1,4 +1,4 @@
/* $OpenBSD: mips64_machdep.c,v 1.39 2022/10/30 17:43:39 guenther Exp $ */
/* $OpenBSD: mips64_machdep.c,v 1.40 2022/11/19 16:23:48 cheloha Exp $ */

/*
* Copyright (c) 2009, 2010, 2012 Miodrag Vallat.
Expand Down Expand Up @@ -44,6 +44,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/clockintr.h>
#include <sys/proc.h>
#include <sys/exec.h>
#include <sys/sysctl.h>
Expand Down Expand Up @@ -320,16 +321,15 @@ cp0_calibrate(struct cpu_info *ci)
}

/*
* Start the real-time and statistics clocks.
* Prepare to start the clock interrupt dispatch cycle.
*/
void
cpu_initclocks(void)
{
struct cpu_info *ci = curcpu();

profhz = hz;

tick = 1000000 / hz; /* number of micro-seconds between interrupts */
tick_nsec = 1000000000 / hz;

cp0_calibrate(ci);

Expand All @@ -349,14 +349,10 @@ cpu_initclocks(void)
(*md_startclock)(ci);
}

/*
* We assume newhz is either stathz or profhz, and that neither will
* change after being set up above. Could recalculate intervals here
* but that would be a drag.
*/
void
setstatclockrate(int newhz)
{
clockintr_setstatclockrate(newhz);
}

/*
Expand Down

0 comments on commit 6b3715e

Please sign in to comment.