Permalink
Browse files

spinlock

  • Loading branch information...
wh5a committed Jun 13, 2010
1 parent 80b9f2f commit 8223e70a9e8c9942f2fd02b6d4e046c7e6da34ed
Showing with 365 additions and 2 deletions.
  1. +35 −0 inc/x86.h
  2. +3 −1 kern/Makefrag
  3. +1 −1 kern/cpu.h
  4. +117 −0 kern/debug.c
  5. +28 −0 kern/debug.h
  6. +5 −0 kern/init.c
  7. +136 −0 kern/spinlock.c
  8. +40 −0 kern/spinlock.h
View
@@ -298,4 +298,39 @@ wrmsr(uint32_t msr, uint32_t eax, uint32_t edx)
__asm __volatile("wrmsr" : : "c" (msr), "a" (eax), "d" (edx));
}
+// Atomically set *addr to newval and return the old value of *addr.
+static inline uint32_t
+xchg(volatile uint32_t *addr, uint32_t newval)
+{
+ uint32_t result;
+
+ // The + in "+m" denotes a read-modify-write operand.
+ asm volatile("lock; xchgl %0, %1" :
+ "+m" (*addr), "=a" (result) :
+ "1" (newval) :
+ "cc");
+ return result;
+}
+
+/* While a spinlock will work if you just do nothing in the loop,
+ Intel has defined a special instruction called PAUSE that notifies
+ the processor that a spin loop is in progress and can improve
+ system performance in such cases, especially on "hyper-threaded"
+ processors that multiplex a single execution unit among multiple
+ virtual CPUs.
+*/
+static inline void
+pause(void)
+{
+ asm volatile("pause" : : : "memory");
+}
+
+static gcc_inline uint16_t
+read_cs(void)
+{
+ uint16_t cs;
+ __asm __volatile("movw %%cs,%0" : "=rm" (cs));
+ return cs;
+}
+
#endif /* !JOS_INC_X86_H */
View
@@ -18,7 +18,6 @@ KERN_SRCFILES := kern/entry.S \
kern/console.c \
kern/monitor.c \
kern/pmap.c \
- kern/cpu.c \
kern/env.c \
kern/kclock.c \
kern/picirq.c \
@@ -28,6 +27,9 @@ KERN_SRCFILES := kern/entry.S \
kern/sched.c \
kern/syscall.c \
kern/kdebug.c \
+ kern/cpu.c \
+ kern/spinlock.c \
+ kern/debug.c \
lib/printfmt.c \
lib/readline.c \
lib/string.c
View
@@ -19,7 +19,7 @@
#include <inc/x86.h>
#include <inc/mmu.h>
#include <inc/trap.h>
-
+#include <inc/memlayout.h>
// Per-CPU kernel state structure.
// Exactly one page (4096 bytes) in size.
View
@@ -0,0 +1,117 @@
+/*
+ * Kernel debugging support.
+ * Called throughout the kernel, especially by assert() macro.
+ *
+ * Copyright (C) 2010 Yale University.
+ * See section "MIT License" in the file LICENSES for licensing terms.
+ *
+ * Primary author: Bryan Ford
+ */
+
+#include <inc/stdio.h>
+#include <inc/stdarg.h>
+#include <inc/assert.h>
+#include <inc/x86.h>
+
+#include <kern/console.h>
+#include <kern/debug.h>
+#include <kern/spinlock.h>
+
+
+// Variable panicstr contains argument to first call to panic; used as flag
+// to indicate that the kernel has already called panic and avoid recursion.
+static const char *panicstr;
+
+// Panic is called on unresolvable fatal errors.
+// It prints "panic: mesg", and then enters the kernel monitor.
+void
+debug_panic(const char *file, int line, const char *fmt,...)
+{
+ va_list ap;
+ int i;
+
+ // Avoid infinite recursion if we're panicking from kernel mode.
+ if ((read_cs() & 3) == 0) {
+ if (panicstr)
+ goto dead;
+ panicstr = fmt;
+ }
+
+ // First print the requested message
+ va_start(ap, fmt);
+ cprintf("kernel panic at %s:%d: ", file, line);
+ vcprintf(fmt, ap);
+ cprintf("\n");
+ va_end(ap);
+
+ // Then print a backtrace of the kernel call chain
+ uint32_t eips[DEBUG_TRACEFRAMES];
+ debug_trace(read_ebp(), eips);
+ for (i = 0; i < DEBUG_TRACEFRAMES && eips[i] != 0; i++)
+ cprintf(" from %08x\n", eips[i]);
+
+dead:
+ while (1) ; // just spin
+}
+
+/* like panic, but don't */
+void
+debug_warn(const char *file, int line, const char *fmt,...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ cprintf("kernel warning at %s:%d: ", file, line);
+ vcprintf(fmt, ap);
+ cprintf("\n");
+ va_end(ap);
+}
+
+// Record the current call stack in eips[] by following the %ebp chain.
+void gcc_noinline
+debug_trace(uint32_t ebp, uint32_t eips[DEBUG_TRACEFRAMES])
+{
+ const uint32_t *frame = (const uint32_t*)ebp;
+ int i;
+
+ for (i = 0; i < 10 && frame; i++) {
+ eips[i] = frame[1]; // saved %eip
+ frame = (uint32_t*)frame[0]; // saved ebp
+ }
+ for (; i < 10; i++) // zero out rest of eips
+ eips[i] = 0;
+}
+
+
+static void gcc_noinline f3(int r, uint32_t *e) { debug_trace(read_ebp(), e); }
+static void gcc_noinline f2(int r, uint32_t *e) { r & 2 ? f3(r,e) : f3(r,e); }
+static void gcc_noinline f1(int r, uint32_t *e) { r & 1 ? f2(r,e) : f2(r,e); }
+
+// Test the backtrace implementation for correct operation
+void
+debug_check(void)
+{
+ uint32_t eips[4][DEBUG_TRACEFRAMES];
+ int r, i;
+
+ // produce several related backtraces...
+ for (i = 0; i < 4; i++)
+ f1(i, eips[i]);
+
+ // ...and make sure they come out correctly.
+ for (r = 0; r < 4; r++)
+ for (i = 0; i < DEBUG_TRACEFRAMES; i++) {
+ assert((eips[r][i] != 0) == (i < 5));
+ if (i >= 2)
+ assert(eips[r][i] == eips[0][i]);
+ }
+ assert(eips[0][0] == eips[1][0]);
+ assert(eips[2][0] == eips[3][0]);
+ assert(eips[1][0] != eips[2][0]);
+ assert(eips[0][1] == eips[2][1]);
+ assert(eips[1][1] == eips[3][1]);
+ assert(eips[0][1] != eips[1][1]);
+
+ cprintf("debug_check() succeeded!\n");
+}
+
View
@@ -0,0 +1,28 @@
+/*
+ * Kernel debugging support.
+ *
+ * Copyright (C) 2010 Yale University.
+ * See section "MIT License" in the file LICENSES for licensing terms.
+ *
+ * Primary author: Bryan Ford
+ */
+
+#ifndef PIOS_KERN_DEBUG_H_
+#define PIOS_KERN_DEBUG_H_
+#ifndef JOS_KERNEL
+# error "This is a kernel header; user programs should not #include it"
+#endif
+
+#include <inc/types.h>
+#include <inc/gcc.h>
+
+
+#define DEBUG_TRACEFRAMES 10
+
+
+void debug_warn(const char*, int, const char*, ...);
+void debug_panic(const char*, int, const char*, ...) gcc_noreturn;
+void debug_trace(uint32_t ebp, uint32_t eips[DEBUG_TRACEFRAMES]);
+void debug_check(void);
+
+#endif /* PIOS_KERN_DEBUG_H_ */
View
@@ -15,6 +15,7 @@
#include <kern/time.h>
#include <kern/pci.h>
#include <kern/cpu.h>
+#include <kern/spinlock.h>
// Called first from entry.S on the bootstrap processor,
// and later from boot/bootother.S on all other processors.
@@ -46,6 +47,10 @@ init(void)
// enable_sep();
+ // Lab 8: check spinlock implementation
+ if (cpu_onboot())
+ spinlock_check();
+
// Lab 4 multitasking initialization functions
pic_init();
kclock_init();
View
@@ -0,0 +1,136 @@
+/*
+ * Spin locks for multiprocessor mutual exclusion in the kernel.
+ *
+ * Copyright (C) 1997 Massachusetts Institute of Technology
+ * See section "MIT License" in the file LICENSES for licensing terms.
+ *
+ * Derived from the xv6 instructional operating system from MIT.
+ * Adapted for PIOS by Bryan Ford at Yale University.
+ */
+
+#include <inc/assert.h>
+#include <inc/x86.h>
+
+#include <kern/cpu.h>
+#include <kern/spinlock.h>
+#include <kern/console.h>
+
+
+void
+spinlock_init_(struct spinlock *lk, const char *file, int line)
+{
+ lk->file = file;
+ lk->line = line;
+ lk->locked = 0;
+ lk->cpu = 0;
+}
+
+// Acquire the lock.
+// Loops (spins) until the lock is acquired.
+// Holding a lock for a long time may cause
+// other CPUs to waste time spinning to acquire it.
+void
+spinlock_acquire(struct spinlock *lk)
+{
+ if(spinlock_holding(lk))
+ panic("recursive spinlock_acquire");
+
+ // The xchg is atomic.
+ // It also serializes,
+ // so that reads after acquire are not reordered before it.
+ while(xchg(&lk->locked, 1) != 0)
+ pause(); // let CPU know we're in a spin loop
+
+ // Record info about lock acquisition for debugging.
+ lk->cpu = cpu_cur();
+ debug_trace(read_ebp(), lk->eips);
+}
+
+// Release the lock.
+void
+spinlock_release(struct spinlock *lk)
+{
+ if(!spinlock_holding(lk))
+ panic("spinlock_release");
+
+ lk->eips[0] = 0;
+ lk->cpu = 0;
+
+ // The xchg serializes, so that reads before release are
+ // not reordered after it. The 1996 PentiumPro manual (Volume 3,
+ // 7.2) says reads can be carried out speculatively and in
+ // any order, which implies we need to serialize here.
+ // But the 2007 Intel 64 Architecture Memory Ordering White
+ // Paper says that Intel 64 and IA-32 will not move a load
+ // after a store. So lock->locked = 0 would work here.
+ // The xchg being asm volatile ensures gcc emits it after
+ // the above assignments (and after the critical section).
+ xchg(&lk->locked, 0);
+}
+
+// Check whether this cpu is holding the lock.
+int
+spinlock_holding(spinlock *lock)
+{
+ return lock->locked && lock->cpu == cpu_cur();
+}
+
+// Function that simply recurses to a specified depth.
+// The useless return value and volatile parameter are
+// so GCC doesn't collapse it via tail-call elimination.
+int gcc_noinline
+spinlock_godeep(volatile int depth, spinlock* lk)
+{
+ if (depth==0) { spinlock_acquire(lk); return 1; }
+ else return spinlock_godeep(depth-1, lk) * depth;
+}
+
+void spinlock_check()
+{
+ const int NUMLOCKS=10;
+ const int NUMRUNS=5;
+ int i,j,run;
+ const char* file = "spinlock_check";
+ spinlock locks[NUMLOCKS];
+
+ // Initialize the locks
+ for(i=0;i<NUMLOCKS;i++) spinlock_init_(&locks[i], file, 0);
+ // Make sure that all locks have CPU set to NULL initially
+ for(i=0;i<NUMLOCKS;i++) assert(locks[i].cpu==NULL);
+ // Make sure that all locks have the correct debug info.
+ for(i=0;i<NUMLOCKS;i++) assert(locks[i].file==file);
+
+ for (run=0;run<NUMRUNS;run++)
+ {
+ // Lock all locks
+ for(i=0;i<NUMLOCKS;i++)
+ spinlock_godeep(i, &locks[i]);
+
+ // Make sure that all locks have the right CPU
+ for(i=0;i<NUMLOCKS;i++)
+ assert(locks[i].cpu == cpu_cur());
+ // Make sure that all locks have holding correctly implemented.
+ for(i=0;i<NUMLOCKS;i++)
+ assert(spinlock_holding(&locks[i]) != 0);
+ // Make sure that top i frames are somewhere in godeep.
+ for(i=0;i<NUMLOCKS;i++)
+ {
+ for(j=0; j<=i && j < DEBUG_TRACEFRAMES ; j++)
+ {
+ assert(locks[i].eips[j] >=
+ (uint32_t)spinlock_godeep);
+ assert(locks[i].eips[j] <
+ (uint32_t)spinlock_godeep+100);
+ }
+ }
+
+ // Release all locks
+ for(i=0;i<NUMLOCKS;i++) spinlock_release(&locks[i]);
+ // Make sure that the CPU has been cleared
+ for(i=0;i<NUMLOCKS;i++) assert(locks[i].cpu == NULL);
+ for(i=0;i<NUMLOCKS;i++) assert(locks[i].eips[0]==0);
+ // Make sure that all locks have holding correctly implemented.
+ for(i=0;i<NUMLOCKS;i++) assert(spinlock_holding(&locks[i]) == 0);
+ }
+ cprintf("spinlock_check() succeeded!\n");
+}
Oops, something went wrong.

0 comments on commit 8223e70

Please sign in to comment.