Skip to content
Browse files

lab2

  • Loading branch information...
1 parent ebd5a0a commit b9a9356306bf497641271c1d97a894b22a39cd95 @aclements aclements committed Sep 16, 2009
Showing with 1,557 additions and 20 deletions.
  1. +7 −4 .gdbinit.tmpl
  2. +2 −2 conf/lab.mk
  3. +34 −0 grade-lab2.sh
  4. +52 −0 inc/env.h
  5. +1 −1 inc/mmu.h
  6. +81 −0 inc/trap.h
  7. +389 −0 kern/env.c
  8. +43 −0 kern/env.h
  9. +5 −13 kern/init.c
  10. +28 −0 kern/kclock.c
  11. +33 −0 kern/kclock.h
  12. +770 −0 kern/pmap.c
  13. +91 −0 kern/pmap.h
  14. +21 −0 kern/trap.h
View
11 .gdbinit.tmpl
@@ -1,9 +1,5 @@
set $lastcs = -1
-# This fails on Darwin because the default gdb has no ELF support
-# echo + symbol-file obj/kern/kernel\n
-# symbol-file obj/kern/kernel
-
define hook-stop
# There doesn't seem to be a good way to detect if we're in 16- or
# 32-bit mode, but we always run with CS == 8 in 32-bit mode.
@@ -25,3 +21,10 @@ end
echo + target remote localhost:1234\n
target remote localhost:1234
+
+# If this fails, it's probably because your GDB doesn't support ELF.
+# Look at the tools page at
+# http://pdos.csail.mit.edu/6.828/2009/tools.html
+# for instructions on building GDB with ELF support.
+echo + symbol-file obj/kern/kernel\n
+symbol-file obj/kern/kernel
View
4 conf/lab.mk
@@ -1,2 +1,2 @@
-LAB=1
-PACKAGEDATE=Wed Sep 9 16:24:04 EDT 2009
+LAB=2
+PACKAGEDATE=Wed Sep 16 15:18:45 EDT 2009
View
34 grade-lab2.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+qemuopts="-hda obj/kern/kernel.img"
+. ./grade-functions.sh
+
+
+$make
+run
+
+score=0
+
+echo_n "Page directory: "
+ if grep "check_boot_pgdir() succeeded!" jos.out >/dev/null
+ then
+ score=`expr 20 + $score`
+ echo OK $time
+ else
+ echo WRONG $time
+ fi
+
+echo_n "Page management: "
+ if grep "page_check() succeeded!" jos.out >/dev/null
+ then
+ score=`expr 30 + $score`
+ echo OK $time
+ else
+ echo WRONG $time
+ fi
+
+echo "Score: $score/50"
+
+if [ $score -lt 50 ]; then
+ exit 1
+fi
View
52 inc/env.h
@@ -0,0 +1,52 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef JOS_INC_ENV_H
+#define JOS_INC_ENV_H
+
+#include <inc/types.h>
+#include <inc/queue.h>
+#include <inc/trap.h>
+#include <inc/memlayout.h>
+
+typedef int32_t envid_t;
+
+// An environment ID 'envid_t' has three parts:
+//
+// +1+---------------21-----------------+--------10--------+
+// |0| Uniqueifier | Environment |
+// | | | Index |
+// +------------------------------------+------------------+
+// \--- ENVX(eid) --/
+//
+// The environment index ENVX(eid) equals the environment's offset in the
+// 'envs[]' array. The uniqueifier distinguishes environments that were
+// created at different times, but share the same environment index.
+//
+// All real environments are greater than 0 (so the sign bit is zero).
+// envid_ts less than 0 signify errors. The envid_t == 0 is special, and
+// stands for the current environment.
+
+#define LOG2NENV 10
+#define NENV (1 << LOG2NENV)
+#define ENVX(envid) ((envid) & (NENV - 1))
+
+// Values of env_status in struct Env
+#define ENV_FREE 0
+#define ENV_RUNNABLE 1
+#define ENV_NOT_RUNNABLE 2
+
+struct Env {
+ struct Trapframe env_tf; // Saved registers
+ LIST_ENTRY(Env) env_link; // Free list link pointers
+ envid_t env_id; // Unique environment identifier
+ envid_t env_parent_id; // env_id of this env's parent
+ unsigned env_status; // Status of the environment
+ uint32_t env_runs; // Number of times environment has run
+
+ // Address space
+ pde_t *env_pgdir; // Kernel virtual address of page dir
+ physaddr_t env_cr3; // Physical address of page dir
+
+};
+
+#endif // !JOS_INC_ENV_H
View
2 inc/mmu.h
@@ -74,7 +74,7 @@
// Only flags in PTE_USER may be used in system calls.
#define PTE_USER (PTE_AVAIL | PTE_P | PTE_W | PTE_U)
-// address in page table entry
+// Address in page table or page directory entry
#define PTE_ADDR(pte) ((physaddr_t) (pte) & ~0xFFF)
// Control Register flags
View
81 inc/trap.h
@@ -0,0 +1,81 @@
+#ifndef JOS_INC_TRAP_H
+#define JOS_INC_TRAP_H
+
+// Trap numbers
+// These are processor defined:
+#define T_DIVIDE 0 // divide error
+#define T_DEBUG 1 // debug exception
+#define T_NMI 2 // non-maskable interrupt
+#define T_BRKPT 3 // breakpoint
+#define T_OFLOW 4 // overflow
+#define T_BOUND 5 // bounds check
+#define T_ILLOP 6 // illegal opcode
+#define T_DEVICE 7 // device not available
+#define T_DBLFLT 8 // double fault
+/* #define T_COPROC 9 */ // reserved (not generated by recent processors)
+#define T_TSS 10 // invalid task switch segment
+#define T_SEGNP 11 // segment not present
+#define T_STACK 12 // stack exception
+#define T_GPFLT 13 // general protection fault
+#define T_PGFLT 14 // page fault
+/* #define T_RES 15 */ // reserved
+#define T_FPERR 16 // floating point error
+#define T_ALIGN 17 // aligment check
+#define T_MCHK 18 // machine check
+#define T_SIMDERR 19 // SIMD floating point error
+
+// These are arbitrarily chosen, but with care not to overlap
+// processor defined exceptions or interrupt vectors.
+#define T_SYSCALL 48 // system call
+#define T_DEFAULT 500 // catchall
+
+// Hardware IRQ numbers. We receive these as (IRQ_OFFSET+IRQ_WHATEVER)
+#define IRQ_TIMER 0
+#define IRQ_KBD 1
+#define IRQ_SPURIOUS 7
+#define IRQ_IDE 14
+#define IRQ_ERROR 19
+
+#ifndef __ASSEMBLER__
+
+#include <inc/types.h>
+
+struct PushRegs {
+ /* registers as pushed by pusha */
+ uint32_t reg_edi;
+ uint32_t reg_esi;
+ uint32_t reg_ebp;
+ uint32_t reg_oesp; /* Useless */
+ uint32_t reg_ebx;
+ uint32_t reg_edx;
+ uint32_t reg_ecx;
+ uint32_t reg_eax;
+};
+
+struct Trapframe {
+ struct PushRegs tf_regs;
+ uint16_t tf_es;
+ uint16_t tf_padding1;
+ uint16_t tf_ds;
+ uint16_t tf_padding2;
+ uint32_t tf_trapno;
+ /* below here defined by x86 hardware */
+ uint32_t tf_err;
+ uintptr_t tf_eip;
+ uint16_t tf_cs;
+ uint16_t tf_padding3;
+ uint32_t tf_eflags;
+ /* below here only when crossing rings, such as from user to kernel */
+ uintptr_t tf_esp;
+ uint16_t tf_ss;
+ uint16_t tf_padding4;
+};
+
+
+#endif /* !__ASSEMBLER__ */
+
+// Must equal 'sizeof(struct Trapframe)'.
+// A static_assert in kern/trap.c checks this.
+#define SIZEOF_STRUCT_TRAPFRAME 0x44
+
+#endif /* !JOS_INC_TRAP_H */
View
389 kern/env.c
@@ -0,0 +1,389 @@
+/* See COPYRIGHT for copyright information. */
+
+#include <inc/x86.h>
+#include <inc/mmu.h>
+#include <inc/error.h>
+#include <inc/string.h>
+#include <inc/assert.h>
+#include <inc/elf.h>
+
+#include <kern/env.h>
+#include <kern/pmap.h>
+#include <kern/trap.h>
+#include <kern/monitor.h>
+
+struct Env *envs = NULL; // All environments
+struct Env *curenv = NULL; // The current env
+static struct Env_list env_free_list; // Free list
+
+#define ENVGENSHIFT 12 // >= LOGNENV
+
+//
+// Converts an envid to an env pointer.
+//
+// RETURNS
+// 0 on success, -E_BAD_ENV on error.
+// On success, sets *penv to the environment.
+// On error, sets *penv to NULL.
+//
+int
+envid2env(envid_t envid, struct Env **env_store, bool checkperm)
+{
+ struct Env *e;
+
+ // If envid is zero, return the current environment.
+ if (envid == 0) {
+ *env_store = curenv;
+ return 0;
+ }
+
+ // Look up the Env structure via the index part of the envid,
+ // then check the env_id field in that struct Env
+ // to ensure that the envid is not stale
+ // (i.e., does not refer to a _previous_ environment
+ // that used the same slot in the envs[] array).
+ e = &envs[ENVX(envid)];
+ if (e->env_status == ENV_FREE || e->env_id != envid) {
+ *env_store = 0;
+ return -E_BAD_ENV;
+ }
+
+ // Check that the calling environment has legitimate permission
+ // to manipulate the specified environment.
+ // If checkperm is set, the specified environment
+ // must be either the current environment
+ // or an immediate child of the current environment.
+ if (checkperm && e != curenv && e->env_parent_id != curenv->env_id) {
+ *env_store = 0;
+ return -E_BAD_ENV;
+ }
+
+ *env_store = e;
+ return 0;
+}
+
+//
+// Mark all environments in 'envs' as free, set their env_ids to 0,
+// and insert them into the env_free_list.
+// Insert in reverse order, so that the first call to env_alloc()
+// returns envs[0].
+//
+void
+env_init(void)
+{
+ // LAB 3: Your code here.
+}
+
+//
+// Initialize the kernel virtual memory layout for environment e.
+// Allocate a page directory, set e->env_pgdir and e->env_cr3 accordingly,
+// and initialize the kernel portion of the new environment's address space.
+// Do NOT (yet) map anything into the user portion
+// of the environment's virtual address space.
+//
+// Returns 0 on success, < 0 on error. Errors include:
+// -E_NO_MEM if page directory or table could not be allocated.
+//
+static int
+env_setup_vm(struct Env *e)
+{
+ int i, r;
+ struct Page *p = NULL;
+
+ // Allocate a page for the page directory
+ if ((r = page_alloc(&p)) < 0)
+ return r;
+
+ // Now, set e->env_pgdir and e->env_cr3,
+ // and initialize the page directory.
+ //
+ // Hint:
+ // - The VA space of all envs is identical above UTOP
+ // (except at VPT and UVPT, which we've set below).
+ // See inc/memlayout.h for permissions and layout.
+ // Can you use boot_pgdir as a template? Hint: Yes.
+ // (Make sure you got the permissions right in Lab 2.)
+ // - The initial VA below UTOP is empty.
+ // - You do not need to make any more calls to page_alloc.
+ // - Note: pp_ref is not maintained for most physical pages
+ // mapped above UTOP -- but you do need to increment
+ // env_pgdir's pp_ref!
+
+ // LAB 3: Your code here.
+
+ // VPT and UVPT map the env's own page table, with
+ // different permissions.
+ e->env_pgdir[PDX(VPT)] = e->env_cr3 | PTE_P | PTE_W;
+ e->env_pgdir[PDX(UVPT)] = e->env_cr3 | PTE_P | PTE_U;
+
+ return 0;
+}
+
+//
+// Allocates and initializes a new environment.
+// On success, the new environment is stored in *newenv_store.
+//
+// Returns 0 on success, < 0 on failure. Errors include:
+// -E_NO_FREE_ENV if all NENVS environments are allocated
+// -E_NO_MEM on memory exhaustion
+//
+int
+env_alloc(struct Env **newenv_store, envid_t parent_id)
+{
+ int32_t generation;
+ int r;
+ struct Env *e;
+
+ if (!(e = LIST_FIRST(&env_free_list)))
+ return -E_NO_FREE_ENV;
+
+ // Allocate and set up the page directory for this environment.
+ if ((r = env_setup_vm(e)) < 0)
+ return r;
+
+ // Generate an env_id for this environment.
+ generation = (e->env_id + (1 << ENVGENSHIFT)) & ~(NENV - 1);
+ if (generation <= 0) // Don't create a negative env_id.
+ generation = 1 << ENVGENSHIFT;
+ e->env_id = generation | (e - envs);
+
+ // Set the basic status variables.
+ e->env_parent_id = parent_id;
+ e->env_status = ENV_RUNNABLE;
+ e->env_runs = 0;
+
+ // Clear out all the saved register state,
+ // to prevent the register values
+ // of a prior environment inhabiting this Env structure
+ // from "leaking" into our new environment.
+ memset(&e->env_tf, 0, sizeof(e->env_tf));
+
+ // Set up appropriate initial values for the segment registers.
+ // GD_UD is the user data segment selector in the GDT, and
+ // GD_UT is the user text segment selector (see inc/memlayout.h).
+ // The low 2 bits of each segment register contains the
+ // Requestor Privilege Level (RPL); 3 means user mode.
+ e->env_tf.tf_ds = GD_UD | 3;
+ e->env_tf.tf_es = GD_UD | 3;
+ e->env_tf.tf_ss = GD_UD | 3;
+ e->env_tf.tf_esp = USTACKTOP;
+ e->env_tf.tf_cs = GD_UT | 3;
+ // You will set e->env_tf.tf_eip later.
+
+ // commit the allocation
+ LIST_REMOVE(e, env_link);
+ *newenv_store = e;
+
+ cprintf("[%08x] new env %08x\n", curenv ? curenv->env_id : 0, e->env_id);
+ return 0;
+}
+
+//
+// Allocate len bytes of physical memory for environment env,
+// and map it at virtual address va in the environment's address space.
+// Does not zero or otherwise initialize the mapped pages in any way.
+// Pages should be writable by user and kernel.
+// Panic if any allocation attempt fails.
+//
+static void
+segment_alloc(struct Env *e, void *va, size_t len)
+{
+ // LAB 3: Your code here.
+ // (But only if you need it for load_icode.)
+ //
+ // Hint: It is easier to use segment_alloc if the caller can pass
+ // 'va' and 'len' values that are not page-aligned.
+ // You should round va down, and round len up.
+}
+
+//
+// Set up the initial program binary, stack, and processor flags
+// for a user process.
+// This function is ONLY called during kernel initialization,
+// before running the first user-mode environment.
+//
+// This function loads all loadable segments from the ELF binary image
+// into the environment's user memory, starting at the appropriate
+// virtual addresses indicated in the ELF program header.
+// At the same time it clears to zero any portions of these segments
+// that are marked in the program header as being mapped
+// but not actually present in the ELF file - i.e., the program's bss section.
+//
+// All this is very similar to what our boot loader does, except the boot
+// loader also needs to read the code from disk. Take a look at
+// boot/main.c to get ideas.
+//
+// Finally, this function maps one page for the program's initial stack.
+//
+// load_icode panics if it encounters problems.
+// - How might load_icode fail? What might be wrong with the given input?
+//
+static void
+load_icode(struct Env *e, uint8_t *binary, size_t size)
+{
+ // Hints:
+ // Load each program segment into virtual memory
+ // at the address specified in the ELF section header.
+ // You should only load segments with ph->p_type == ELF_PROG_LOAD.
+ // Each segment's virtual address can be found in ph->p_va
+ // and its size in memory can be found in ph->p_memsz.
+ // The ph->p_filesz bytes from the ELF binary, starting at
+ // 'binary + ph->p_offset', should be copied to virtual address
+ // ph->p_va. Any remaining memory bytes should be cleared to zero.
+ // (The ELF header should have ph->p_filesz <= ph->p_memsz.)
+ // Use functions from the previous lab to allocate and map pages.
+ //
+ // All page protection bits should be user read/write for now.
+ // ELF segments are not necessarily page-aligned, but you can
+ // assume for this function that no two segments will touch
+ // the same virtual page.
+ //
+ // You may find a function like segment_alloc useful.
+ //
+ // Loading the segments is much simpler if you can move data
+ // directly into the virtual addresses stored in the ELF binary.
+ // So which page directory should be in force during
+ // this function?
+ //
+ // Hint:
+ // You must also do something with the program's entry point,
+ // to make sure that the environment starts executing there.
+ // What? (See env_run() and env_pop_tf() below.)
+
+ // LAB 3: Your code here.
+
+ // Now map one page for the program's initial stack
+ // at virtual address USTACKTOP - PGSIZE.
+
+ // LAB 3: Your code here.
+}
+
+//
+// Allocates a new env and loads the named elf binary into it.
+// This function is ONLY called during kernel initialization,
+// before running the first user-mode environment.
+// The new env's parent ID is set to 0.
+//
+// Where does the result go?
+// By convention, envs[0] is the first environment allocated, so
+// whoever calls env_create simply looks for the newly created
+// environment there.
+void
+env_create(uint8_t *binary, size_t size)
+{
+ // LAB 3: Your code here.
+}
+
+//
+// Frees env e and all memory it uses.
+//
+void
+env_free(struct Env *e)
+{
+ pte_t *pt;
+ uint32_t pdeno, pteno;
+ physaddr_t pa;
+
+ // If freeing the current environment, switch to boot_pgdir
+ // before freeing the page directory, just in case the page
+ // gets reused.
+ if (e == curenv)
+ lcr3(boot_cr3);
+
+ // Note the environment's demise.
+ cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id);
+
+ // Flush all mapped pages in the user portion of the address space
+ static_assert(UTOP % PTSIZE == 0);
+ for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) {
+
+ // only look at mapped page tables
+ if (!(e->env_pgdir[pdeno] & PTE_P))
+ continue;
+
+ // find the pa and va of the page table
+ pa = PTE_ADDR(e->env_pgdir[pdeno]);
+ pt = (pte_t*) KADDR(pa);
+
+ // unmap all PTEs in this page table
+ for (pteno = 0; pteno <= PTX(~0); pteno++) {
+ if (pt[pteno] & PTE_P)
+ page_remove(e->env_pgdir, PGADDR(pdeno, pteno, 0));
+ }
+
+ // free the page table itself
+ e->env_pgdir[pdeno] = 0;
+ page_decref(pa2page(pa));
+ }
+
+ // free the page directory
+ pa = e->env_cr3;
+ e->env_pgdir = 0;
+ e->env_cr3 = 0;
+ page_decref(pa2page(pa));
+
+ // return the environment to the free list
+ e->env_status = ENV_FREE;
+ LIST_INSERT_HEAD(&env_free_list, e, env_link);
+}
+
+//
+// Frees environment e.
+// If e was the current env, then runs a new environment (and does not return
+// to the caller).
+//
+void
+env_destroy(struct Env *e)
+{
+ env_free(e);
+
+ cprintf("Destroyed the only environment - nothing more to do!\n");
+ while (1)
+ monitor(NULL);
+}
+
+
+//
+// Restores the register values in the Trapframe with the 'iret' instruction.
+// This exits the kernel and starts executing some environment's code.
+// This function does not return.
+//
+void
+env_pop_tf(struct Trapframe *tf)
+{
+ __asm __volatile("movl %0,%%esp\n"
+ "\tpopal\n"
+ "\tpopl %%es\n"
+ "\tpopl %%ds\n"
+ "\taddl $0x8,%%esp\n" /* skip tf_trapno and tf_errcode */
+ "\tiret"
+ : : "g" (tf) : "memory");
+ panic("iret failed"); /* mostly to placate the compiler */
+}
+
+//
+// Context switch from curenv to env e.
+// Note: if this is the first call to env_run, curenv is NULL.
+// (This function does not return.)
+//
+void
+env_run(struct Env *e)
+{
+ // Step 1: If this is a context switch (a new environment is running),
+ // then set 'curenv' to the new environment,
+ // update its 'env_runs' counter, and
+ // and use lcr3() to switch to its address space.
+ // Step 2: Use env_pop_tf() to restore the environment's
+ // registers and drop into user mode in the
+ // environment.
+
+ // Hint: This function loads the new environment's state from
+ // e->env_tf. Go back through the code you wrote above
+ // and make sure you have set the relevant parts of
+ // e->env_tf to sensible values.
+
+ // LAB 3: Your code here.
+
+ panic("env_run not yet implemented");
+}
+
View
43 kern/env.h
@@ -0,0 +1,43 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef JOS_KERN_ENV_H
+#define JOS_KERN_ENV_H
+
+#include <inc/env.h>
+
+#ifndef JOS_MULTIENV
+// Change this value to 1 once you're allowing multiple environments
+// (for UCLA: Lab 3, Part 3; for MIT: Lab 4).
+#define JOS_MULTIENV 0
+#endif
+
+extern struct Env *envs; // All environments
+extern struct Env *curenv; // Current environment
+
+LIST_HEAD(Env_list, Env); // Declares 'struct Env_list'
+
+void env_init(void);
+int env_alloc(struct Env **e, envid_t parent_id);
+void env_free(struct Env *e);
+void env_create(uint8_t *binary, size_t size);
+void env_destroy(struct Env *e); // Does not return if e == curenv
+
+int envid2env(envid_t envid, struct Env **env_store, bool checkperm);
+// The following two functions do not return
+void env_run(struct Env *e) __attribute__((noreturn));
+void env_pop_tf(struct Trapframe *tf) __attribute__((noreturn));
+
+// For the grading script
+#define ENV_CREATE2(start, size) { \
+ extern uint8_t start[], size[]; \
+ env_create(start, (int)size); \
+}
+
+#define ENV_CREATE(x) { \
+ extern uint8_t _binary_obj_##x##_start[], \
+ _binary_obj_##x##_size[]; \
+ env_create(_binary_obj_##x##_start, \
+ (int)_binary_obj_##x##_size); \
+}
+
+#endif // !JOS_KERN_ENV_H
View
18 kern/init.c
@@ -6,18 +6,9 @@
#include <kern/monitor.h>
#include <kern/console.h>
+#include <kern/pmap.h>
+#include <kern/kclock.h>
-// Test the stack backtrace function (lab 1 only)
-void
-test_backtrace(int x)
-{
- cprintf("entering test_backtrace %d\n", x);
- if (x > 0)
- test_backtrace(x-1);
- else
- mon_backtrace(0, 0, 0);
- cprintf("leaving test_backtrace %d\n", x);
-}
void
i386_init(void)
@@ -35,14 +26,15 @@ i386_init(void)
cprintf("6828 decimal is %o octal!\n", 6828);
+ // Lab 2 memory management initialization functions
+ i386_detect_memory();
+ i386_vm_init();
- // Test the stack backtrace function (lab 1 only)
- test_backtrace(5);
// Drop into the kernel monitor.
while (1)
View
28 kern/kclock.c
@@ -0,0 +1,28 @@
+/* See COPYRIGHT for copyright information. */
+
+/* Support for two time-related hardware gadgets: 1) the run time
+ * clock with its NVRAM access functions; 2) the 8253 timer, which
+ * generates interrupts on IRQ 0.
+ */
+
+#include <inc/x86.h>
+
+#include <kern/kclock.h>
+
+
+unsigned
+mc146818_read(unsigned reg)
+{
+ outb(IO_RTC, reg);
+ return inb(IO_RTC+1);
+}
+
+void
+mc146818_write(unsigned reg, unsigned datum)
+{
+ outb(IO_RTC, reg);
+ outb(IO_RTC+1, datum);
+}
+
+
+
View
33 kern/kclock.h
@@ -0,0 +1,33 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef JOS_KERN_KCLOCK_H
+#define JOS_KERN_KCLOCK_H
+#ifndef JOS_KERNEL
+# error "This is a JOS kernel header; user programs should not #include it"
+#endif
+
+#define IO_RTC 0x070 /* RTC port */
+
+#define MC_NVRAM_START 0xe /* start of NVRAM: offset 14 */
+#define MC_NVRAM_SIZE 50 /* 50 bytes of NVRAM */
+
+/* NVRAM bytes 7 & 8: base memory size */
+#define NVRAM_BASELO (MC_NVRAM_START + 7) /* low byte; RTC off. 0x15 */
+#define NVRAM_BASEHI (MC_NVRAM_START + 8) /* high byte; RTC off. 0x16 */
+
+/* NVRAM bytes 9 & 10: extended memory size */
+#define NVRAM_EXTLO (MC_NVRAM_START + 9) /* low byte; RTC off. 0x17 */
+#define NVRAM_EXTHI (MC_NVRAM_START + 10) /* high byte; RTC off. 0x18 */
+
+/* NVRAM bytes 34 and 35: extended memory POSTed size */
+#define NVRAM_PEXTLO (MC_NVRAM_START + 34) /* low byte; RTC off. 0x30 */
+#define NVRAM_PEXTHI (MC_NVRAM_START + 35) /* high byte; RTC off. 0x31 */
+
+/* NVRAM byte 36: current century. (please increment in Dec99!) */
+#define NVRAM_CENTURY (MC_NVRAM_START + 36) /* RTC offset 0x32 */
+
+unsigned mc146818_read(unsigned reg);
+void mc146818_write(unsigned reg, unsigned datum);
+void kclock_init(void);
+
+#endif // !JOS_KERN_KCLOCK_H
View
770 kern/pmap.c
@@ -0,0 +1,770 @@
+/* See COPYRIGHT for copyright information. */
+
+#include <inc/x86.h>
+#include <inc/mmu.h>
+#include <inc/error.h>
+#include <inc/string.h>
+#include <inc/assert.h>
+
+#include <kern/pmap.h>
+#include <kern/kclock.h>
+
+// These variables are set by i386_detect_memory()
+static physaddr_t maxpa; // Maximum physical address
+size_t npage; // Amount of physical memory (in pages)
+static size_t basemem; // Amount of base memory (in bytes)
+static size_t extmem; // Amount of extended memory (in bytes)
+
+// These variables are set in i386_vm_init()
+pde_t* boot_pgdir; // Virtual address of boot time page directory
+physaddr_t boot_cr3; // Physical address of boot time page directory
+static char* boot_freemem; // Pointer to next byte of free mem
+
+struct Page* pages; // Virtual address of physical page array
+static struct Page_list page_free_list; // Free list of physical pages
+
+// Global descriptor table.
+//
+// The kernel and user segments are identical (except for the DPL).
+// To load the SS register, the CPL must equal the DPL. Thus,
+// we must duplicate the segments for the user and the kernel.
+//
+struct Segdesc gdt[] =
+{
+ // 0x0 - unused (always faults -- for trapping NULL far pointers)
+ SEG_NULL,
+
+ // 0x8 - kernel code segment
+ [GD_KT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),
+
+ // 0x10 - kernel data segment
+ [GD_KD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),
+
+ // 0x18 - user code segment
+ [GD_UT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),
+
+ // 0x20 - user data segment
+ [GD_UD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
+
+ // 0x28 - tss, initialized in idt_init()
+ [GD_TSS >> 3] = SEG_NULL
+};
+
+struct Pseudodesc gdt_pd = {
+ sizeof(gdt) - 1, (unsigned long) gdt
+};
+
+static int
+nvram_read(int r)
+{
+ return mc146818_read(r) | (mc146818_read(r + 1) << 8);
+}
+
+void
+i386_detect_memory(void)
+{
+ // CMOS tells us how many kilobytes there are
+ basemem = ROUNDDOWN(nvram_read(NVRAM_BASELO)*1024, PGSIZE);
+ extmem = ROUNDDOWN(nvram_read(NVRAM_EXTLO)*1024, PGSIZE);
+
+ // Calculate the maximum physical address based on whether
+ // or not there is any extended memory. See comment in <inc/mmu.h>.
+ if (extmem)
+ maxpa = EXTPHYSMEM + extmem;
+ else
+ maxpa = basemem;
+
+ npage = maxpa / PGSIZE;
+
+ cprintf("Physical memory: %dK available, ", (int)(maxpa/1024));
+ cprintf("base = %dK, extended = %dK\n", (int)(basemem/1024), (int)(extmem/1024));
+}
+
+// --------------------------------------------------------------
+// Set up initial memory mappings and turn on MMU.
+// --------------------------------------------------------------
+
+static void check_boot_pgdir(void);
+static void check_page_alloc();
+static void page_check(void);
+static void boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm);
+
+//
+// A simple physical memory allocator, used only a few times
+// in the process of setting up the virtual memory system.
+// page_alloc() is the real allocator.
+//
+// Allocate n bytes of physical memory aligned on an
+// align-byte boundary. Align must be a power of two.
+// Return kernel virtual address. Returned memory is uninitialized.
+//
+// If we're out of memory, boot_alloc should panic.
+// This function may ONLY be used during initialization,
+// before the page_free_list has been set up.
+//
+static void*
+boot_alloc(uint32_t n, uint32_t align)
+{
+ extern char end[];
+ void *v;
+
+ // Initialize boot_freemem if this is the first time.
+ // 'end' is a magic symbol automatically generated by the linker,
+ // which points to the end of the kernel's bss segment -
+ // i.e., the first virtual address that the linker
+ // did _not_ assign to any kernel code or global variables.
+ if (boot_freemem == 0)
+ boot_freemem = end;
+
+ // LAB 2: Your code here:
+ // Step 1: round boot_freemem up to be aligned properly
+ // Step 2: save current value of boot_freemem as allocated chunk
+ // Step 3: increase boot_freemem to record allocation
+ // Step 4: return allocated chunk
+
+ return NULL;
+}
+
+// Set up a two-level page table:
+// boot_pgdir is its linear (virtual) address of the root
+// boot_cr3 is the physical adresss of the root
+// Then turn on paging. Then effectively turn off segmentation.
+// (i.e., the segment base addrs are set to zero).
+//
+// This function only sets up the kernel part of the address space
+// (ie. addresses >= UTOP). The user part of the address space
+// will be setup later.
+//
+// From UTOP to ULIM, the user is allowed to read but not write.
+// Above ULIM the user cannot read (or write).
+void
+i386_vm_init(void)
+{
+ pde_t* pgdir;
+ uint32_t cr0;
+ size_t n;
+
+ // Delete this line:
+ panic("i386_vm_init: This function is not finished\n");
+
+ //////////////////////////////////////////////////////////////////////
+ // create initial page directory.
+ pgdir = boot_alloc(PGSIZE, PGSIZE);
+ memset(pgdir, 0, PGSIZE);
+ boot_pgdir = pgdir;
+ boot_cr3 = PADDR(pgdir);
+
+ //////////////////////////////////////////////////////////////////////
+ // Recursively insert PD in itself as a page table, to form
+ // a virtual page table at virtual address VPT.
+ // (For now, you don't have understand the greater purpose of the
+ // following two lines.)
+
+ // Permissions: kernel RW, user NONE
+ pgdir[PDX(VPT)] = PADDR(pgdir)|PTE_W|PTE_P;
+
+ // same for UVPT
+ // Permissions: kernel R, user R
+ pgdir[PDX(UVPT)] = PADDR(pgdir)|PTE_U|PTE_P;
+
+ //////////////////////////////////////////////////////////////////////
+ // Make 'pages' point to an array of size 'npage' of 'struct Page'.
+ // The kernel uses this structure to keep track of physical pages;
+ // 'npage' equals the number of physical pages in memory. User-level
+ // programs will get read-only access to the array as well.
+ // You must allocate the array yourself.
+ // Your code goes here:
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Now that we've allocated the initial kernel data structures, we set
+ // up the list of free physical pages. Once we've done so, all further
+ // memory management will go through the page_* functions. In
+ // particular, we can now map memory using boot_map_segment or page_insert
+ page_init();
+
+ check_page_alloc();
+
+ page_check();
+
+ //////////////////////////////////////////////////////////////////////
+ // Now we set up virtual memory
+
+ //////////////////////////////////////////////////////////////////////
+ // Map 'pages' read-only by the user at linear address UPAGES
+ // Permissions:
+ // - the new image at UPAGES -- kernel R, user R
+ // (ie. perm = PTE_U | PTE_P)
+ // - pages itself -- kernel RW, user NONE
+ // Your code goes here:
+
+ //////////////////////////////////////////////////////////////////////
+ // Use the physical memory that bootstack refers to as
+ // the kernel stack. The complete VA
+ // range of the stack, [KSTACKTOP-PTSIZE, KSTACKTOP), breaks into two
+ // pieces:
+ // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
+ // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed => faults
+ // Permissions: kernel RW, user NONE
+ // Your code goes here:
+
+ //////////////////////////////////////////////////////////////////////
+ // Map all of physical memory at KERNBASE.
+ // Ie. the VA range [KERNBASE, 2^32) should map to
+ // the PA range [0, 2^32 - KERNBASE)
+ // We might not have 2^32 - KERNBASE bytes of physical memory, but
+ // we just set up the mapping anyway.
+ // Permissions: kernel RW, user NONE
+ // Your code goes here:
+
+ // Check that the initial page directory has been set up correctly.
+ check_boot_pgdir();
+
+ //////////////////////////////////////////////////////////////////////
+ // On x86, segmentation maps a VA to a LA (linear addr) and
+ // paging maps the LA to a PA. I.e. VA => LA => PA. If paging is
+ // turned off the LA is used as the PA. Note: there is no way to
+ // turn off segmentation. The closest thing is to set the base
+ // address to 0, so the VA => LA mapping is the identity.
+
+ // Current mapping: VA KERNBASE+x => PA x.
+ // (segmentation base=-KERNBASE and paging is off)
+
+ // From here on down we must maintain this VA KERNBASE + x => PA x
+ // mapping, even though we are turning on paging and reconfiguring
+ // segmentation.
+
+ // Map VA 0:4MB same as VA KERNBASE, i.e. to PA 0:4MB.
+ // (Limits our kernel to <4MB)
+ pgdir[0] = pgdir[PDX(KERNBASE)];
+
+ // Install page table.
+ lcr3(boot_cr3);
+
+ // Turn on paging.
+ cr0 = rcr0();
+ cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_TS|CR0_EM|CR0_MP;
+ cr0 &= ~(CR0_TS|CR0_EM);
+ lcr0(cr0);
+
+ // Current mapping: KERNBASE+x => x => x.
+ // (x < 4MB so uses paging pgdir[0])
+
+ // Reload all segment registers.
+ asm volatile("lgdt gdt_pd");
+ asm volatile("movw %%ax,%%gs" :: "a" (GD_UD|3));
+ asm volatile("movw %%ax,%%fs" :: "a" (GD_UD|3));
+ asm volatile("movw %%ax,%%es" :: "a" (GD_KD));
+ asm volatile("movw %%ax,%%ds" :: "a" (GD_KD));
+ asm volatile("movw %%ax,%%ss" :: "a" (GD_KD));
+ asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KT)); // reload cs
+ asm volatile("lldt %%ax" :: "a" (0));
+
+ // Final mapping: KERNBASE+x => KERNBASE+x => x.
+
+ // This mapping was only used after paging was turned on but
+ // before the segment registers were reloaded.
+ pgdir[0] = 0;
+
+ // Flush the TLB for good measure, to kill the pgdir[0] mapping.
+ lcr3(boot_cr3);
+}
+
+//
+// Check the physical page allocator (page_alloc(), page_free(),
+// and page_init()).
+//
+static void
+check_page_alloc()
+{
+ struct Page *pp, *pp0, *pp1, *pp2;
+ struct Page_list fl;
+
+ // if there's a page that shouldn't be on
+ // the free list, try to make sure it
+ // eventually causes trouble.
+ LIST_FOREACH(pp0, &page_free_list, pp_link)
+ memset(page2kva(pp0), 0x97, 128);
+
+ // should be able to allocate three pages
+ pp0 = pp1 = pp2 = 0;
+ assert(page_alloc(&pp0) == 0);
+ assert(page_alloc(&pp1) == 0);
+ assert(page_alloc(&pp2) == 0);
+
+ assert(pp0);
+ assert(pp1 && pp1 != pp0);
+ assert(pp2 && pp2 != pp1 && pp2 != pp0);
+ assert(page2pa(pp0) < npage*PGSIZE);
+ assert(page2pa(pp1) < npage*PGSIZE);
+ assert(page2pa(pp2) < npage*PGSIZE);
+
+ // temporarily steal the rest of the free pages
+ fl = page_free_list;
+ LIST_INIT(&page_free_list);
+
+ // should be no free memory
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+ // free and re-allocate?
+ page_free(pp0);
+ page_free(pp1);
+ page_free(pp2);
+ pp0 = pp1 = pp2 = 0;
+ assert(page_alloc(&pp0) == 0);
+ assert(page_alloc(&pp1) == 0);
+ assert(page_alloc(&pp2) == 0);
+ assert(pp0);
+ assert(pp1 && pp1 != pp0);
+ assert(pp2 && pp2 != pp1 && pp2 != pp0);
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+ // give free list back
+ page_free_list = fl;
+
+ // free the pages we took
+ page_free(pp0);
+ page_free(pp1);
+ page_free(pp2);
+
+ cprintf("check_page_alloc() succeeded!\n");
+}
+
+//
+// Checks that the kernel part of virtual address space
+// has been setup roughly correctly(by i386_vm_init()).
+//
+// This function doesn't test every corner case,
+// in fact it doesn't test the permission bits at all,
+// but it is a pretty good sanity check.
+//
+static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
+
+static void
+check_boot_pgdir(void)
+{
+ uint32_t i, n;
+ pde_t *pgdir;
+
+ pgdir = boot_pgdir;
+
+ // check pages array
+ n = ROUNDUP(npage*sizeof(struct Page), PGSIZE);
+ for (i = 0; i < n; i += PGSIZE)
+ assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
+
+
+ // check phys mem
+ for (i = 0; i < npage * PGSIZE; i += PGSIZE)
+ assert(check_va2pa(pgdir, KERNBASE + i) == i);
+
+ // check kernel stack
+ for (i = 0; i < KSTKSIZE; i += PGSIZE)
+ assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
+
+ // check for zero/non-zero in PDEs
+ for (i = 0; i < NPDENTRIES; i++) {
+ switch (i) {
+ case PDX(VPT):
+ case PDX(UVPT):
+ case PDX(KSTACKTOP-1):
+ case PDX(UPAGES):
+ assert(pgdir[i]);
+ break;
+ default:
+ if (i >= PDX(KERNBASE))
+ assert(pgdir[i]);
+ else
+ assert(pgdir[i] == 0);
+ break;
+ }
+ }
+ cprintf("check_boot_pgdir() succeeded!\n");
+}
+
+// This function returns the physical address of the page containing 'va',
+// defined by the page directory 'pgdir'. The hardware normally performs
+// this functionality for us! We define our own version to help check
+// the check_boot_pgdir() function; it shouldn't be used elsewhere.
+
+static physaddr_t
+check_va2pa(pde_t *pgdir, uintptr_t va)
+{
+ pte_t *p;
+
+ pgdir = &pgdir[PDX(va)];
+ if (!(*pgdir & PTE_P))
+ return ~0;
+ p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
+ if (!(p[PTX(va)] & PTE_P))
+ return ~0;
+ return PTE_ADDR(p[PTX(va)]);
+}
+
+// --------------------------------------------------------------
+// Tracking of physical pages.
+// The 'pages' array has one 'struct Page' entry per physical page.
+// Pages are reference counted, and free pages are kept on a linked list.
+// --------------------------------------------------------------
+
+//
+// Initialize page structure and memory free list.
+// After this point, ONLY use the functions below
+// to allocate and deallocate physical memory via the page_free_list,
+// and NEVER use boot_alloc()
+//
+void
+page_init(void)
+{
+ // The example code here marks all pages as free.
+ // However this is not truly the case. What memory is free?
+ // 1) Mark page 0 as in use.
+ // This way we preserve the real-mode IDT and BIOS structures
+ // in case we ever need them. (Currently we don't, but...)
+ // 2) Mark the rest of base memory as free.
+ // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM).
+ // Mark it as in use so that it can never be allocated.
+ // 4) Then extended memory [EXTPHYSMEM, ...).
+ // Some of it is in use, some is free. Where is the kernel?
+ // Which pages are used for page tables and other data structures?
+ //
+ // Change the code to reflect this.
+ int i;
+ LIST_INIT(&page_free_list);
+ for (i = 0; i < npage; i++) {
+ pages[i].pp_ref = 0;
+ LIST_INSERT_HEAD(&page_free_list, &pages[i], pp_link);
+ }
+}
+
+//
+// Initialize a Page structure.
+// The result has null links and 0 refcount.
+// Note that the corresponding physical page is NOT initialized!
+//
+static void
+page_initpp(struct Page *pp)
+{
+ memset(pp, 0, sizeof(*pp));
+}
+
+//
+// Allocates a physical page.
+// Does NOT set the contents of the physical page to zero -
+// the caller must do that if necessary.
+//
+// *pp_store -- is set to point to the Page struct of the newly allocated
+// page
+//
+// RETURNS
+// 0 -- on success
+// -E_NO_MEM -- otherwise
+//
+// Hint: use LIST_FIRST, LIST_REMOVE, and page_initpp
+// Hint: pp_ref should not be incremented
+int
+page_alloc(struct Page **pp_store)
+{
+ // Fill this function in
+ return -E_NO_MEM;
+}
+
+//
+// Return a page to the free list.
+// (This function should only be called when pp->pp_ref reaches 0.)
+//
+void
+page_free(struct Page *pp)
+{
+ // Fill this function in
+}
+
+//
+// Decrement the reference count on a page,
+// freeing it if there are no more refs.
+//
+void
+page_decref(struct Page* pp)
+{
+ if (--pp->pp_ref == 0)
+ page_free(pp);
+}
+
+// Given 'pgdir', a pointer to a page directory, pgdir_walk returns
+// a pointer to the page table entry (PTE) for linear address 'va'.
+// This requires walking the two-level page table structure.
+//
+// If the relevant page table doesn't exist in the page directory, then:
+// - If create == 0, pgdir_walk returns NULL.
+// - Otherwise, pgdir_walk tries to allocate a new page table
+// with page_alloc. If this fails, pgdir_walk returns NULL.
+// - pgdir_walk sets pp_ref to 1 for the new page table.
+// - pgdir_walk clears the new page table.
+// - Finally, pgdir_walk returns a pointer into the new page table.
+//
+// Hint: you can turn a Page * into the physical address of the
+// page it refers to with page2pa() from kern/pmap.h.
+//
+// Hint 2: the x86 MMU checks permission bits in both the page directory
+// and the page table, so it's safe to leave permissions in the page
+// more permissive than strictly necessary.
+pte_t *
+pgdir_walk(pde_t *pgdir, const void *va, int create)
+{
+ // Fill this function in
+ return NULL;
+}
+
+//
+// Map the physical page 'pp' at virtual address 'va'.
+// The permissions (the low 12 bits) of the page table
+// entry should be set to 'perm|PTE_P'.
+//
+// Requirements
+// - If there is already a page mapped at 'va', it should be page_remove()d.
+// - If necessary, on demand, a page table should be allocated and inserted
+// into 'pgdir'.
+// - pp->pp_ref should be incremented if the insertion succeeds.
+// - The TLB must be invalidated if a page was formerly present at 'va'.
+//
+// Corner-case hint: Make sure to consider what happens when the same
+// pp is re-inserted at the same virtual address in the same pgdir.
+//
+// RETURNS:
+// 0 on success
+// -E_NO_MEM, if page table couldn't be allocated
+//
+// Hint: The TA solution is implemented using pgdir_walk, page_remove,
+// and page2pa.
+//
+int
+page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm)
+{
+ // Fill this function in
+ return 0;
+}
+
+//
+// Map [la, la+size) of linear address space to physical [pa, pa+size)
+// in the page table rooted at pgdir. Size is a multiple of PGSIZE.
+// Use permission bits perm|PTE_P for the entries.
+//
+// This function is only intended to set up the ``static'' mappings
+// above UTOP. As such, it should *not* change the pp_ref field on the
+// mapped pages.
+//
+// Hint: the TA solution uses pgdir_walk
+static void
+boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm)
+{
+ // Fill this function in
+}
+
+//
+// Return the page mapped at virtual address 'va'.
+// If pte_store is not zero, then we store in it the address
+// of the pte for this page. This is used by page_remove
+// but should not be used by other callers.
+//
+// Return NULL if there is no page mapped at va.
+//
+// Hint: the TA solution uses pgdir_walk and pa2page.
+//
+struct Page *
+page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
+{
+ // Fill this function in
+ return NULL;
+}
+
+//
+// Unmaps the physical page at virtual address 'va'.
+// If there is no physical page at that address, silently does nothing.
+//
+// Details:
+// - The ref count on the physical page should decrement.
+// - The physical page should be freed if the refcount reaches 0.
+// - The pg table entry corresponding to 'va' should be set to 0.
+// (if such a PTE exists)
+// - The TLB must be invalidated if you remove an entry from
+// the pg dir/pg table.
+//
+// Hint: The TA solution is implemented using page_lookup,
+// tlb_invalidate, and page_decref.
+//
+void
+page_remove(pde_t *pgdir, void *va)
+{
+ // Fill this function in
+}
+
+//
+// Invalidate a TLB entry, but only if the page tables being
+// edited are the ones currently in use by the processor.
+//
+void
+tlb_invalidate(pde_t *pgdir, void *va)
+{
+ // Flush the entry only if we're modifying the current address space.
+ // For now, there is only one address space, so always invalidate.
+ invlpg(va);
+}
+
+// check page_insert, page_remove, &c
+static void
+page_check(void)
+{
+ struct Page *pp, *pp0, *pp1, *pp2;
+ struct Page_list fl;
+ pte_t *ptep, *ptep1;
+ void *va;
+ int i;
+
+ // should be able to allocate three pages
+ pp0 = pp1 = pp2 = 0;
+ assert(page_alloc(&pp0) == 0);
+ assert(page_alloc(&pp1) == 0);
+ assert(page_alloc(&pp2) == 0);
+
+ assert(pp0);
+ assert(pp1 && pp1 != pp0);
+ assert(pp2 && pp2 != pp1 && pp2 != pp0);
+
+ // temporarily steal the rest of the free pages
+ fl = page_free_list;
+ LIST_INIT(&page_free_list);
+
+ // should be no free memory
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+ // there is no page allocated at address 0
+ assert(page_lookup(boot_pgdir, (void *) 0x0, &ptep) == NULL);
+
+ // there is no free memory, so we can't allocate a page table
+ assert(page_insert(boot_pgdir, pp1, 0x0, 0) < 0);
+
+ // free pp0 and try again: pp0 should be used for page table
+ page_free(pp0);
+ assert(page_insert(boot_pgdir, pp1, 0x0, 0) == 0);
+ assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
+ assert(check_va2pa(boot_pgdir, 0x0) == page2pa(pp1));
+ assert(pp1->pp_ref == 1);
+ assert(pp0->pp_ref == 1);
+
+ // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
+ assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
+ assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
+ assert(pp2->pp_ref == 1);
+
+ // should be no free memory
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+ // should be able to map pp2 at PGSIZE because it's already there
+ assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
+ assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
+ assert(pp2->pp_ref == 1);
+
+ // pp2 should NOT be on the free list
+ // could happen in ref counts are handled sloppily in page_insert
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+ // check that pgdir_walk returns a pointer to the pte
+ ptep = KADDR(PTE_ADDR(boot_pgdir[PDX(PGSIZE)]));
+ assert(pgdir_walk(boot_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
+
+ // should be able to change permissions too.
+ assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, PTE_U) == 0);
+ assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
+ assert(pp2->pp_ref == 1);
+ assert(*pgdir_walk(boot_pgdir, (void*) PGSIZE, 0) & PTE_U);
+ assert(boot_pgdir[0] & PTE_U);
+
+ // should not be able to map at PTSIZE because need free page for page table
+ assert(page_insert(boot_pgdir, pp0, (void*) PTSIZE, 0) < 0);
+
+ // insert pp1 at PGSIZE (replacing pp2)
+ assert(page_insert(boot_pgdir, pp1, (void*) PGSIZE, 0) == 0);
+ assert(!(*pgdir_walk(boot_pgdir, (void*) PGSIZE, 0) & PTE_U));
+
+ // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
+ assert(check_va2pa(boot_pgdir, 0) == page2pa(pp1));
+ assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
+ // ... and ref counts should reflect this
+ assert(pp1->pp_ref == 2);
+ assert(pp2->pp_ref == 0);
+
+ // pp2 should be returned by page_alloc
+ assert(page_alloc(&pp) == 0 && pp == pp2);
+
+ // unmapping pp1 at 0 should keep pp1 at PGSIZE
+ page_remove(boot_pgdir, 0x0);
+ assert(check_va2pa(boot_pgdir, 0x0) == ~0);
+ assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
+ assert(pp1->pp_ref == 1);
+ assert(pp2->pp_ref == 0);
+
+ // unmapping pp1 at PGSIZE should free it
+ page_remove(boot_pgdir, (void*) PGSIZE);
+ assert(check_va2pa(boot_pgdir, 0x0) == ~0);
+ assert(check_va2pa(boot_pgdir, PGSIZE) == ~0);
+ assert(pp1->pp_ref == 0);
+ assert(pp2->pp_ref == 0);
+
+ // so it should be returned by page_alloc
+ assert(page_alloc(&pp) == 0 && pp == pp1);
+
+ // should be no free memory
+ assert(page_alloc(&pp) == -E_NO_MEM);
+
+#if 0
+ // should be able to page_insert to change a page
+ // and see the new data immediately.
+ memset(page2kva(pp1), 1, PGSIZE);
+ memset(page2kva(pp2), 2, PGSIZE);
+ page_insert(boot_pgdir, pp1, 0x0, 0);
+ assert(pp1->pp_ref == 1);
+ assert(*(int*)0 == 0x01010101);
+ page_insert(boot_pgdir, pp2, 0x0, 0);
+ assert(*(int*)0 == 0x02020202);
+ assert(pp2->pp_ref == 1);
+ assert(pp1->pp_ref == 0);
+ page_remove(boot_pgdir, 0x0);
+ assert(pp2->pp_ref == 0);
+#endif
+
+ // forcibly take pp0 back
+ assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
+ boot_pgdir[0] = 0;
+ assert(pp0->pp_ref == 1);
+ pp0->pp_ref = 0;
+
+ // check pointer arithmetic in pgdir_walk
+ page_free(pp0);
+ va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
+ ptep = pgdir_walk(boot_pgdir, va, 1);
+ ptep1 = KADDR(PTE_ADDR(boot_pgdir[PDX(va)]));
+ assert(ptep == ptep1 + PTX(va));
+ boot_pgdir[PDX(va)] = 0;
+ pp0->pp_ref = 0;
+
+ // check that new page tables get cleared
+ memset(page2kva(pp0), 0xFF, PGSIZE);
+ page_free(pp0);
+ pgdir_walk(boot_pgdir, 0x0, 1);
+ ptep = page2kva(pp0);
+ for(i=0; i<NPTENTRIES; i++)
+ assert((ptep[i] & PTE_P) == 0);
+ boot_pgdir[0] = 0;
+ pp0->pp_ref = 0;
+
+ // give free list back
+ page_free_list = fl;
+
+ // free the pages we took
+ page_free(pp0);
+ page_free(pp1);
+ page_free(pp2);
+
+ cprintf("page_check() succeeded!\n");
+}
+
View
91 kern/pmap.h
@@ -0,0 +1,91 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef JOS_KERN_PMAP_H
+#define JOS_KERN_PMAP_H
+#ifndef JOS_KERNEL
+# error "This is a JOS kernel header; user programs should not #include it"
+#endif
+
+#include <inc/memlayout.h>
+#include <inc/assert.h>
+
+
+/* This macro takes a kernel virtual address -- an address that points above
+ * KERNBASE, where the machine's maximum 256MB of physical memory is mapped --
+ * and returns the corresponding physical address. It panics if you pass it a
+ * non-kernel virtual address.
+ */
+#define PADDR(kva) \
+({ \
+ physaddr_t __m_kva = (physaddr_t) (kva); \
+ if (__m_kva < KERNBASE) \
+ panic("PADDR called with invalid kva %08lx", __m_kva);\
+ __m_kva - KERNBASE; \
+})
+
+/* This macro takes a physical address and returns the corresponding kernel
+ * virtual address. It panics if you pass an invalid physical address. */
+#define KADDR(pa) \
+({ \
+ physaddr_t __m_pa = (pa); \
+ uint32_t __m_ppn = PPN(__m_pa); \
+ if (__m_ppn >= npage) \
+ panic("KADDR called with invalid pa %08lx", __m_pa);\
+ (void*) (__m_pa + KERNBASE); \
+})
+
+
+
+extern char bootstacktop[], bootstack[];
+
+extern struct Page *pages;
+extern size_t npage;
+
+extern physaddr_t boot_cr3;
+extern pde_t *boot_pgdir;
+
+extern struct Segdesc gdt[];
+extern struct Pseudodesc gdt_pd;
+
+void i386_vm_init();
+void i386_detect_memory();
+
+void page_init(void);
+int page_alloc(struct Page **pp_store);
+void page_free(struct Page *pp);
+int page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm);
+void page_remove(pde_t *pgdir, void *va);
+struct Page *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store);
+void page_decref(struct Page *pp);
+
+void tlb_invalidate(pde_t *pgdir, void *va);
+
+static inline ppn_t
+page2ppn(struct Page *pp)
+{
+ return pp - pages;
+}
+
+static inline physaddr_t
+page2pa(struct Page *pp)
+{
+ return page2ppn(pp) << PGSHIFT;
+}
+
+static inline struct Page*
+pa2page(physaddr_t pa)
+{
+ if (PPN(pa) >= npage)
+ panic("pa2page called with invalid pa");
+ return &pages[PPN(pa)];
+}
+
+static inline void*
+page2kva(struct Page *pp)
+{
+ return KADDR(page2pa(pp));
+}
+
+pte_t *pgdir_walk(pde_t *pgdir, const void *va, int create);
+
+#endif /* !JOS_KERN_PMAP_H */
View
21 kern/trap.h
@@ -0,0 +1,21 @@
+/* See COPYRIGHT for copyright information. */
+
+#ifndef JOS_KERN_TRAP_H
+#define JOS_KERN_TRAP_H
+#ifndef JOS_KERNEL
+# error "This is a JOS kernel header; user programs should not #include it"
+#endif
+
+#include <inc/trap.h>
+#include <inc/mmu.h>
+
+/* The kernel's interrupt descriptor table */
+extern struct Gatedesc idt[];
+
+void idt_init(void);
+void print_regs(struct PushRegs *regs);
+void print_trapframe(struct Trapframe *tf);
+void page_fault_handler(struct Trapframe *);
+void backtrace(struct Trapframe *);
+
+#endif /* JOS_KERN_TRAP_H */

0 comments on commit b9a9356

Please sign in to comment.
Something went wrong with that request. Please try again.