Skip to content
Browse files

bring in latest KAME (as of 19991130, KAME/NetBSD141) into kame branch

just for reference purposes.
This commit includes 1.4 -> 1.4.1 sync for kame branch.

The branch does not compile at all (due to the lack of ALTQ and some other
source code).  Please do not try to modify the branch, this is just for
referenre purposes.

synchronization to latest KAME will take place on HEAD branch soon.
  • Loading branch information...
1 parent 44941d3 commit b649bedd55a27812059a2ee34a3bbfc5cefcf3aa itojun committed Nov 30, 1999
Showing with 140 additions and 258 deletions.
  1. +140 −258 sys/arch/pc532/pc532/pmap.new.c
View
398 sys/arch/pc532/pc532/pmap.new.c
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.new.c,v 1.1.1.3 1998/08/31 21:17:43 matthias Exp $ */
+/* $NetBSD: pmap.new.c,v 1.10.2.2.4.1 1999/11/30 13:32:48 itojun Exp $ */
/*
*
@@ -33,7 +33,7 @@
*/
/*
- * pmap.c: i386 pmap module rewrite
+ * pmap.c: i386 pmap module rewrite and ported to the ns532
* Chuck Cranor <chuck@ccrc.wustl.edu>
* 11-Aug-97
*
@@ -59,8 +59,8 @@
* (NetBSD/alpha).
*/
-#include "opt_cputype.h"
#include "opt_lockdebug.h"
+#include "opt_multiprocessor.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -76,15 +76,11 @@
#include <uvm/uvm.h>
#include <machine/cpu.h>
-#include <machine/specialreg.h>
-
-#include <dev/isa/isareg.h>
-#include <i386/isa/isa_machdep.h>
/*
* general info:
*
- * - for an explanation of how the i386 MMU hardware works see
+ * - for an explanation of how the ns532 MMU hardware works see
* the comments in <machine/pte.h>.
*
* - for an explanation of the general memory structure used by
@@ -253,7 +249,7 @@
* locking data structures
*/
-#if NCPU > 1 || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
struct lock pmap_main_lock;
simple_lock_data_t pvalloc_lock;
simple_lock_data_t pmaps_lock;
@@ -300,28 +296,18 @@ int nkpde = NKPTP;
#endif
/*
- * pmap_pg_g: if our processor supports PG_G in the PTE then we
- * set pmap_pg_g to PG_G (otherwise it is zero).
- */
-
-int pmap_pg_g = 0;
-
-/*
- * i386 physical memory comes in a big contig chunk with a small
- * hole toward the front of it... the following 4 paddr_t's
- * (shared with machdep.c) describe the physical address space
- * of this machine.
+ * pc532 physical memory comes in a big contig chunk. The following 2
+ * paddr_t's (shared with machdep.c) describe the physical address
+ * space of this machine.
*/
paddr_t avail_start; /* PA of first available physical page */
paddr_t avail_end; /* PA of last available physical page */
-paddr_t hole_start; /* PA of start of "hole" */
-paddr_t hole_end; /* PA of end of "hole" */
/*
* other data structures
*/
-static pt_entry_t protection_codes[8]; /* maps MI prot to i386 prot code */
+static pt_entry_t protection_codes[8]; /* maps MI prot to ns532 prot code */
static boolean_t pmap_initialized = FALSE; /* pmap_init done yet? */
/*
@@ -374,15 +360,6 @@ caddr_t vmmap; /* XXX: used by mem.c... it should really uvm_map_reserve it */
extern vaddr_t msgbuf_vaddr;
extern paddr_t msgbuf_paddr;
-extern vaddr_t idt_vaddr; /* we allocate IDT early */
-extern paddr_t idt_paddr;
-
-#if defined(I586_CPU)
-/* stuff to fix the pentium f00f bug */
-extern vaddr_t pentium_idt_vaddr;
-#endif
-
-
/*
* local prototypes
*/
@@ -446,7 +423,13 @@ __inline static boolean_t pmap_is_curpmap(pmap)
struct pmap *pmap;
{
- return((pmap == pmap_kernel()) || (pmap->pm_pdirpa == (paddr_t) rcr3()));
+ paddr_t ptb;
+ if (pmap == pmap_kernel())
+ return(1);
+ smr(ptb0, ptb);
+ if (pmap->pm_pdirpa == ptb)
+ return(1);
+ return(0);
}
/*
@@ -646,18 +629,10 @@ vsize_t len;
}
*pte = 0; /* zap! */
-#if defined(I386_CPU)
- if (cpu_class != CPUCLASS_386)
-#endif
- pmap_update_pg(va);
+ pmap_update_pg(va);
}
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386)
- pmap_update();
-#endif
-
}
/*
@@ -689,31 +664,16 @@ int npgs;
pt_entry_t *pte, opte;
int lcv;
vaddr_t tva;
-#if defined(I386_CPU)
- boolean_t need_update = FALSE;
-#endif
for (lcv = 0 ; lcv < npgs ; lcv++) {
tva = va + lcv * NBPG;
pte = vtopte(tva);
opte = *pte;
- *pte = VM_PAGE_TO_PHYS(pgs[lcv]) | PG_RW | PG_V | pmap_pg_g; /* zap! */
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386) {
- if (pmap_valid_entry(opte))
- need_update = TRUE;
- continue;
- }
-#endif
+ *pte = VM_PAGE_TO_PHYS(pgs[lcv]) | PG_RW | PG_V; /* zap! */
if (pmap_valid_entry(opte))
pmap_update_pg(tva);
}
-#if defined(I386_CPU)
- if (need_update && cpu_class == CPUCLASS_386)
- pmap_update();
-#endif
-
}
/*
@@ -729,11 +689,11 @@ int npgs;
* properly enabled (called before main()). the VM system is
* fully init'd later...
*
- * => on i386, locore.s has already enabled the MMU by allocating
+ * => on ns532, machdep.c has already enabled the MMU by allocating
* a PDP for the kernel, and nkpde PTP's for the kernel.
* => kva_start is the first free virtual address in kernel space
* => we make use of the global vars from machdep.c:
- * avail_start, avail_end, hole_start, hole_end
+ * avail_start, avail_end
*/
void pmap_bootstrap(kva_start)
@@ -742,9 +702,7 @@ vaddr_t kva_start;
{
struct pmap *kpm;
- vaddr_t kva;
pt_entry_t *pte;
- int first16q;
/*
* set the page size (default value is 4K which is ok)
@@ -763,7 +721,7 @@ vaddr_t kva_start;
* use the very last page of physical memory for the message buffer
*/
- avail_end -= i386_round_page(MSGBUFSIZE);
+ avail_end -= ns532_round_page(MSGBUFSIZE);
msgbuf_paddr = avail_end;
/*
@@ -777,7 +735,7 @@ vaddr_t kva_start;
/*
* set up protection_codes: we need to be able to convert from
* a MI protection code (some combo of VM_PROT...) to something
- * we can jam into a i386 PTE.
+ * we can jam into a ns532 PTE.
*/
protection_codes[VM_PROT_NONE] = 0; /* --- */
@@ -807,39 +765,24 @@ vaddr_t kva_start;
kpm->pm_obj.uo_npages = 0;
kpm->pm_obj.uo_refs = 1;
memset(&kpm->pm_list, 0, sizeof(kpm->pm_list)); /* pm_list not used */
- kpm->pm_pdir = (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_cr3 + KERNBASE);
- kpm->pm_pdirpa = (u_int32_t) proc0.p_addr->u_pcb.pcb_cr3;
+ kpm->pm_pdir = (pd_entry_t *)(proc0.p_addr->u_pcb.pcb_ptb + KERNBASE);
+ kpm->pm_pdirpa = (u_int32_t) proc0.p_addr->u_pcb.pcb_ptb;
kpm->pm_stats.wired_count = kpm->pm_stats.resident_count =
- i386_btop(kva_start - VM_MIN_KERNEL_ADDRESS);
+ ns532_btop(kva_start - VM_MIN_KERNEL_ADDRESS);
/*
* the above is just a rough estimate and not critical to the proper
* operation of the system.
*/
curpcb->pcb_pmap = kpm; /* proc0's pcb */
/*
- * enable global TLB entries if they are supported
- */
-
- if (cpu_feature & CPUID_PGE) {
-
- lcr4(rcr4() | CR4_PGE); /* enable hardware (via %cr4) */
- pmap_pg_g = PG_G; /* enable software */
-
- /* add PG_G attribute to already mapped kernel pages */
- for (kva = VM_MIN_KERNEL_ADDRESS ; kva < virtual_avail ; kva += NBPG)
- if (pmap_valid_entry(PTE_BASE[i386_btop(kva)]))
- PTE_BASE[i386_btop(kva)] |= PG_G;
- }
-
- /*
* now we allocate the "special" VAs which are used for tmp mappings
* by the pmap (and other modules). we allocate the VAs by advancing
* virtual_avail (note that there are no pages mapped at these VAs).
* we find the PTE that maps the allocated VA via the linear PTE mapping.
*/
- pte = PTE_BASE + i386_btop(virtual_avail);
+ pte = PTE_BASE + ns532_btop(virtual_avail);
csrcp = (caddr_t) virtual_avail; csrc_pte = pte; /* allocate */
virtual_avail += NBPG; pte++; /* advance */
@@ -860,18 +803,6 @@ vaddr_t kva_start;
msgbuf_vaddr = virtual_avail; /* don't need pte */
virtual_avail += round_page(MSGBUFSIZE); pte++;
- idt_vaddr = virtual_avail; /* don't need pte */
- virtual_avail += NBPG; pte++;
- avail_end -= NBPG;
- idt_paddr = avail_end;
-
-#if defined(I586_CPU)
- /* pentium f00f bug stuff */
- pentium_idt_vaddr = virtual_avail; /* don't need pte */
- virtual_avail += NBPG; pte++;
-#endif
-
-
/*
* now we reserve some VM for mapping pages when doing a crash dump
*/
@@ -882,7 +813,7 @@ vaddr_t kva_start;
* init the static-global locks and global lists.
*/
-#if NCPU > 1 || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
lockinit(&pmap_main_lock, PVM, "pmaplk", 0, 0);
simple_lock_init(&pvalloc_lock);
simple_lock_init(&pmaps_lock);
@@ -905,32 +836,13 @@ vaddr_t kva_start;
* we must call uvm_page_physload() after we are done playing with
* virtual_avail but before we call pmap_steal_memory. [i.e. here]
* this call tells the VM system how much physical memory it
- * controls. If we have 16M of RAM or less, just put it all on
- * the default free list. Otherwise, put the first 16M of RAM
- * on a lower priority free list (so that all of the ISA DMA'able
- * memory won't be eaten up first-off).
+ * controls.
*/
- if (avail_end <= (16 * 1024 * 1024))
- first16q = VM_FREELIST_DEFAULT;
- else
- first16q = VM_FREELIST_FIRST16;
-
- if (avail_start < hole_start) /* any free memory before the hole? */
- uvm_page_physload(atop(avail_start), atop(hole_start),
- atop(avail_start), atop(hole_start), first16q);
-
- if (first16q == VM_FREELIST_FIRST16) {
- uvm_page_physload(atop(hole_end), atop(16 * 1024 * 1024),
- atop(hole_end), atop(16 * 1024 * 1024), first16q);
- uvm_page_physload(atop(16 * 1024 * 1024), atop(avail_end),
- atop(16 * 1024 * 1024), atop(avail_end),
- VM_FREELIST_DEFAULT);
- } else {
- uvm_page_physload(atop(hole_end), atop(avail_end),
- atop(hole_end), atop(avail_end), first16q);
- }
-
+ uvm_page_physload(atop(avail_start), atop(avail_end),
+ atop(avail_start), atop(avail_end),
+ VM_FREELIST_DEFAULT);
+
/*
* ensure the TLB is sync'd with reality by flushing it...
*/
@@ -963,8 +875,8 @@ void pmap_init()
npages = 0;
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
- s = (vsize_t) (sizeof(struct pv_head) * npages +
- sizeof(char) * npages);
+ s = (vsize_t) (sizeof(struct pv_head) * npages +
+ sizeof(*(vm_physmem[0].pmseg.attrs)) * npages);
s = round_page(s); /* round up */
addr = (vaddr_t) uvm_km_zalloc(kernel_map, s);
if (addr == NULL)
@@ -983,7 +895,7 @@ void pmap_init()
/* now allocate attrs */
for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- vm_physmem[lcv].pmseg.attrs = (char *) addr;
+ vm_physmem[lcv].pmseg.attrs = (short *) addr;
addr = (vaddr_t)(vm_physmem[lcv].pmseg.attrs +
(vm_physmem[lcv].end - vm_physmem[lcv].start));
}
@@ -1162,7 +1074,7 @@ int mode;
}
pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva - vm_map_min(kernel_map),
- NULL);
+ NULL, UVM_PGA_USERESERVE);
if (pg)
pg->flags &= ~PG_BUSY; /* never busy */
@@ -1351,7 +1263,7 @@ struct pv_entry *pv;
{
struct pv_page *pvp;
- pvp = (struct pv_page *) i386_trunc_page(pv);
+ pvp = (struct pv_page *) ns532_trunc_page(pv);
pv_nfpvents++;
pvp->pvinfo.pvpi_nfree++;
@@ -1460,7 +1372,7 @@ static void pmap_free_pvpage()
/* unmap the page */
dead_entries = NULL;
(void)uvm_unmap_remove(map, (vaddr_t) pvp,
- ((vaddr_t) pvp) + NBPG, TRUE, &dead_entries);
+ ((vaddr_t) pvp) + NBPG, &dead_entries);
vm_map_unlock(map);
if (dead_entries != NULL)
@@ -1572,8 +1484,8 @@ boolean_t just_try;
{
struct vm_page *ptp;
- ptp = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(pde_index), NULL);
-
+ ptp = uvm_pagealloc(&pmap->pm_obj, ptp_i2o(pde_index), NULL,
+ UVM_PGA_USERESERVE);
if (ptp == NULL) {
if (just_try)
return(NULL);
@@ -1770,6 +1682,7 @@ struct pmap *pmap;
memset(pmap->pm_pdir, 0, PDSLOT_PTE * sizeof(pd_entry_t));
/* put in recursive PDE to map the PTEs */
pmap->pm_pdir[PDSLOT_PTE] = pmap->pm_pdirpa | PG_V | PG_KW;
+
/*
* we need to lock pmaps_lock to prevent nkpde from changing on
@@ -1779,11 +1692,13 @@ struct pmap *pmap;
*/
simple_lock(&pmaps_lock);
/* put in kernel VM PDEs */
- memcpy(&pmap->pm_pdir[PDSLOT_KERN], &PDP_BASE[PDSLOT_KERN],
- nkpde * sizeof(pd_entry_t));
+ movsdnu(&PDP_BASE[PDSLOT_KERN], &pmap->pm_pdir[PDSLOT_KERN],
+ nkpde * sizeof(pd_entry_t) / 4);
/* zero the rest */
memset(&pmap->pm_pdir[PDSLOT_KERN + nkpde], 0,
NBPG - ((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));
+ /* wire in i/o page */
+ pmap->pm_pdir[PDSLOT_APTE + 1] = PDP_BASE[PDSLOT_APTE + 1];
LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
if (pmaps_hand == NULL)
pmaps_hand = pmap;
@@ -1900,10 +1815,10 @@ struct proc *p;
struct pcb *pcb = &p->p_addr->u_pcb;
struct pmap *pmap = p->p_vmspace->vm_map.pmap;
- pcb->pcb_cr3 = pmap->pm_pdirpa;
+ pcb->pcb_ptb = pmap->pm_pdirpa;
pcb->pcb_pmap = pmap;
if (p == curproc) {
- lcr3(pcb->pcb_cr3);
+ load_ptb(pcb->pcb_ptb);
}
}
@@ -1946,7 +1861,7 @@ vaddr_t va;
if (pmap->pm_pdir[pdei(va)]) {
ptes = pmap_map_ptes(pmap);
- retval = (paddr_t) (ptes[i386_btop(va)] & PG_FRAME);
+ retval = (paddr_t) (ptes[ns532_btop(va)] & PG_FRAME);
pmap_unmap_ptes(pmap);
return(retval | (va & ~PG_FRAME));
}
@@ -1984,7 +1899,7 @@ int prot;
{
while (spa < epa) {
- pmap_enter(pmap_kernel(), va, spa, prot, FALSE);
+ pmap_enter(pmap_kernel(), va, spa, prot, FALSE, 0);
va += NBPG;
spa += NBPG;
}
@@ -2030,7 +1945,7 @@ paddr_t srcpa, dstpa;
*csrc_pte = (srcpa & PG_FRAME) | PG_V | PG_RW;
*cdst_pte = (dstpa & PG_FRAME) | PG_V | PG_RW;
- memcpy(cdstp, csrcp, NBPG);
+ movsdnu(csrcp, cdstp, NBPG / 4);
*csrc_pte = *cdst_pte = 0; /* zap! */
pmap_update_2pg((vaddr_t)csrcp, (vaddr_t)cdstp);
simple_unlock(&pmap_copy_page_lock);
@@ -2089,15 +2004,11 @@ vaddr_t startva, endva;
if (pmap_rr) { /* worried about tlb flushing? */
- if (opte & PG_G) {
- pmap_update_pg(startva); /* PG_G requires this */
+ if (pmap_rr->prr_npages < PMAP_RR_MAX) {
+ pmap_rr->prr_vas[pmap_rr->prr_npages++] = startva;
} else {
- if (pmap_rr->prr_npages < PMAP_RR_MAX) {
- pmap_rr->prr_vas[pmap_rr->prr_npages++] = startva;
- } else {
- if (pmap_rr->prr_npages == PMAP_RR_MAX)
- pmap_rr->prr_npages++; /* signal an overflow */
- }
+ if (pmap_rr->prr_npages == PMAP_RR_MAX)
+ pmap_rr->prr_npages++; /* signal an overflow */
}
}
@@ -2111,7 +2022,7 @@ vaddr_t startva, endva;
if ((opte & PG_PVLIST) == 0)
continue;
- bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);
+ bank = vm_physseg_find(ns532_btop(opte & PG_FRAME), &off);
if (bank == -1)
panic("pmap_remove_ptes: unmanaged page marked PG_PVLIST");
@@ -2185,7 +2096,7 @@ vaddr_t va;
if ((opte & PG_PVLIST) == 0)
return(TRUE);
- bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);
+ bank = vm_physseg_find(ns532_btop(opte & PG_FRAME), &off);
if (bank == -1)
panic("pmap_remove_pte: unmanaged page marked PG_PVLIST");
@@ -2266,17 +2177,12 @@ vaddr_t sva, eva;
}
/* do it! */
- result = pmap_remove_pte(pmap, ptp, &ptes[i386_btop(sva)], sva);
+ result = pmap_remove_pte(pmap, ptp, &ptes[ns532_btop(sva)], sva);
/* if mapping removed and the PTP is no longer being used, free it! */
if (result && ptp && ptp->wire_count <= 1) {
pmap->pm_pdir[pdei(sva)] = 0; /* zap! */
-#if defined(I386_CPU)
- if (cpu_class != CPUCLASS_386) /* already dumped whole TLB on i386 */
-#endif
- {
- pmap_update_pg( ((vaddr_t) ptes) + ptp->offset );
- }
+ pmap_update_pg( ((vaddr_t) ptes) + ptp->offset );
pmap->pm_stats.resident_count--;
if (pmap->pm_ptphint == ptp) /* update hint? */
pmap->pm_ptphint = pmap->pm_obj.memq.tqh_first;
@@ -2311,7 +2217,7 @@ vaddr_t sva, eva;
for (/* null */ ; sva < eva ; sva = blkendva) {
/* determine range of block */
- blkendva = i386_round_pdr(sva+1);
+ blkendva = ns532_round_pdr(sva+1);
if (blkendva > eva)
blkendva = eva;
@@ -2354,18 +2260,13 @@ vaddr_t sva, eva;
}
}
- pmap_remove_ptes(pmap, prr, ptp, (vaddr_t)&ptes[i386_btop(sva)],
+ pmap_remove_ptes(pmap, prr, ptp, (vaddr_t)&ptes[ns532_btop(sva)],
sva, blkendva);
/* if PTP is no longer being used, free it! */
if (ptp && ptp->wire_count <= 1) {
pmap->pm_pdir[pdei(sva)] = 0; /* zap! */
pmap_update_pg( ((vaddr_t) ptes) + ptp->offset);
-#if defined(I386_CPU)
- /* cancel possible pending pmap update on i386 */
- if (cpu_class == CPUCLASS_386 && prr)
- prr->prr_npages = 0;
-#endif
pmap->pm_stats.resident_count--;
if (pmap->pm_ptphint == ptp) /* update hint? */
pmap->pm_ptphint = pmap->pm_obj.memq.tqh_first;
@@ -2379,20 +2280,13 @@ vaddr_t sva, eva;
*/
if (prr && prr->prr_npages) {
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386) {
+ if (prr->prr_npages > PMAP_RR_MAX) {
pmap_update();
- } else
-#endif
- { /* not I386 */
- if (prr->prr_npages > PMAP_RR_MAX) {
- pmap_update();
- } else {
- while (prr->prr_npages) {
- pmap_update_pg(prr->prr_vas[--prr->prr_npages]);
- }
- }
- } /* not I386 */
+ } else {
+ while (prr->prr_npages) {
+ pmap_update_pg(prr->prr_vas[--prr->prr_npages]);
+ }
+ }
}
/*
@@ -2420,9 +2314,6 @@ struct vm_page *pg;
struct pv_head *pvh;
struct pv_entry *pve;
pt_entry_t *ptes, opte;
-#if defined(I386_CPU)
- boolean_t needs_update = FALSE;
-#endif
/* XXX: vm_page should either contain pv_head or have a pointer to it */
bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off);
@@ -2456,20 +2347,15 @@ struct vm_page *pg;
}
#endif
- opte = ptes[i386_btop(pve->pv_va)];
- ptes[i386_btop(pve->pv_va)] = 0; /* zap! */
+ opte = ptes[ns532_btop(pve->pv_va)];
+ ptes[ns532_btop(pve->pv_va)] = 0; /* zap! */
if (opte & PG_W)
pve->pv_pmap->pm_stats.wired_count--;
pve->pv_pmap->pm_stats.resident_count--;
if (pmap_is_curpmap(pve->pv_pmap)) {
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386)
- needs_update = TRUE;
- else
-#endif
- pmap_update_pg(pve->pv_va);
+ pmap_update_pg(pve->pv_va);
}
/* sync R/M bits */
@@ -2481,9 +2367,6 @@ struct vm_page *pg;
if (pve->pv_ptp->wire_count <= 1) {
pve->pv_pmap->pm_pdir[pdei(pve->pv_va)] = 0; /* zap! */
pmap_update_pg( ((vaddr_t) ptes) + pve->pv_ptp->offset);
-#if defined(I386_CPU)
- needs_update = FALSE;
-#endif
pve->pv_pmap->pm_stats.resident_count--;
if (pve->pv_pmap->pm_ptphint == pve->pv_ptp) /* update hint? */
pve->pv_pmap->pm_ptphint = pve->pv_pmap->pm_obj.memq.tqh_first;
@@ -2500,11 +2383,6 @@ struct vm_page *pg;
simple_unlock(&pvh->pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
-#if defined(I386_CPU)
- if (needs_update)
- pmap_update();
-#endif
-
return;
}
@@ -2528,7 +2406,7 @@ int testbits;
{
int bank, off;
- char *myattrs;
+ short *myattrs;
struct pv_head *pvh;
struct pv_entry *pve;
pt_entry_t *ptes, pte;
@@ -2559,7 +2437,7 @@ int testbits;
pve = pve->pv_next) {
ptes = pmap_map_ptes(pve->pv_pmap);
- pte = ptes[i386_btop(pve->pv_va)];
+ pte = ptes[ns532_btop(pve->pv_va)];
pmap_unmap_ptes(pve->pv_pmap);
*myattrs |= pte;
@@ -2593,10 +2471,7 @@ int setbits, clearbits;
struct pv_head *pvh;
struct pv_entry *pve;
pt_entry_t *ptes, npte;
- char *myattrs;
-#if defined(I386_CPU)
- boolean_t needs_update = FALSE;
-#endif
+ short *myattrs;
/* XXX: vm_page should either contain pv_head or have a pointer to it */
bank = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), &off);
@@ -2624,19 +2499,14 @@ int setbits, clearbits;
#endif
ptes = pmap_map_ptes(pve->pv_pmap); /* locks pmap */
- npte = ptes[i386_btop(pve->pv_va)];
+ npte = ptes[ns532_btop(pve->pv_va)];
result |= (npte & clearbits);
npte = (npte | setbits) & ~clearbits;
- if (ptes[i386_btop(pve->pv_va)] != npte) {
- ptes[i386_btop(pve->pv_va)] = npte; /* zap! */
+ if (ptes[ns532_btop(pve->pv_va)] != npte) {
+ ptes[ns532_btop(pve->pv_va)] = npte; /* zap! */
if (pmap_is_curpmap(pve->pv_pmap)) {
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386)
- needs_update = TRUE;
- else
-#endif
- pmap_update_pg(pve->pv_va);
+ pmap_update_pg(pve->pv_va);
}
}
pmap_unmap_ptes(pve->pv_pmap); /* unlocks pmap */
@@ -2646,11 +2516,6 @@ int setbits, clearbits;
simple_unlock(&pvh->pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
-#if defined(I386_CPU)
- if (needs_update)
- pmap_update();
-#endif
-
return(result != 0);
}
@@ -2731,8 +2596,8 @@ vm_prot_t prot;
else if (sva < VM_MAX_ADDRESS)
md_prot |= (PG_u | PG_RW); /* XXX: write-prot our PTES? never! */
- spte = &ptes[i386_btop(sva)];
- epte = &ptes[i386_btop(blockend)];
+ spte = &ptes[ns532_btop(sva)];
+ epte = &ptes[ns532_btop(blockend)];
for (/*null */; spte < epte ; spte++) {
@@ -2746,16 +2611,12 @@ vm_prot_t prot;
if (prr) { /* worried about tlb flushing? */
- va = i386_ptob(spte - ptes);
- if (npte & PG_G) {
- pmap_update_pg(va);/* PG_G requires this */
+ va = ns532_ptob(spte - ptes);
+ if (prr->prr_npages < PMAP_RR_MAX) {
+ prr->prr_vas[prr->prr_npages++] = va;
} else {
- if (prr->prr_npages < PMAP_RR_MAX) {
- prr->prr_vas[prr->prr_npages++] = va;
- } else {
- if (prr->prr_npages == PMAP_RR_MAX)
- prr->prr_npages++; /* signal an overflow */
- }
+ if (prr->prr_npages == PMAP_RR_MAX)
+ prr->prr_npages++; /* signal an overflow */
}
} /* if (prr) */
@@ -2771,20 +2632,13 @@ vm_prot_t prot;
*/
if (prr && prr->prr_npages) {
-#if defined(I386_CPU)
- if (cpu_class == CPUCLASS_386) {
+ if (prr->prr_npages > PMAP_RR_MAX) {
pmap_update();
- } else
-#endif
- { /* not I386 */
- if (prr->prr_npages > PMAP_RR_MAX) {
- pmap_update();
- } else {
- while (prr->prr_npages) {
- pmap_update_pg(prr->prr_vas[--prr->prr_npages]);
- }
- }
- } /* not I386 */
+ } else {
+ while (prr->prr_npages) {
+ pmap_update_pg(prr->prr_vas[--prr->prr_npages]);
+ }
+ }
}
/*
@@ -2820,13 +2674,13 @@ boolean_t wired;
ptes = pmap_map_ptes(pmap); /* locks pmap */
- if (!pmap_valid_entry(ptes[i386_btop(va)]))
+ if (!pmap_valid_entry(ptes[ns532_btop(va)]))
panic("pmap_change_wiring: invalid (unmapped) va");
- if (!wired && (ptes[i386_btop(va)] & PG_W) != 0) {
- ptes[i386_btop(va)] &= ~PG_W;
+ if (!wired && (ptes[ns532_btop(va)] & PG_W) != 0) {
+ ptes[ns532_btop(va)] &= ~PG_W;
pmap->pm_stats.wired_count--;
- } else if (wired && (ptes[i386_btop(va)] & PG_W) == 0) {
- ptes[i386_btop(va)] |= PG_W;
+ } else if (wired && (ptes[ns532_btop(va)] & PG_W) == 0) {
+ ptes[ns532_btop(va)] |= PG_W;
pmap->pm_stats.wired_count++;
}
@@ -2964,11 +2818,11 @@ boolean_t move;
*/
srcl.addr = saddr;
- srcl.pte = &srcptes[i386_btop(srcl.addr)];
+ srcl.pte = &srcptes[ns532_btop(srcl.addr)];
srcl.ptp = NULL;
dstl.addr = daddr;
if (dstptes)
- dstl.pte = &dstptes[i386_btop(dstl.addr)];
+ dstl.pte = &dstptes[ns532_btop(dstl.addr)];
else
dstl.pte = NULL; /* we map page at a time */
dstl.ptp = NULL;
@@ -2980,10 +2834,10 @@ boolean_t move;
* compute the size of this block.
*/
- blklen = i386_round_pdr(srcl.addr+1) - srcl.addr; /* length in bytes */
+ blklen = ns532_round_pdr(srcl.addr+1) - srcl.addr; /* length in bytes */
if (blklen > len)
blklen = len;
- blkpgs = i386_btop(blklen);
+ blkpgs = ns532_btop(blklen);
/*
* if the block is not valid in the src pmap, then we can skip it!
@@ -3027,7 +2881,7 @@ boolean_t move;
} else {
dstl.ptp = NULL;
}
- dstvalid = i386_btop(i386_round_pdr(dstl.addr+1) - dstl.addr);
+ dstvalid = ns532_btop(ns532_round_pdr(dstl.addr+1) - dstl.addr);
if (dstptes == NULL) {
dstl.pte = (pt_entry_t *)
pmap_tmpmap_pa(dstpmap->pm_pdir[pdei(dstl.addr)] & PG_FRAME);
@@ -3055,7 +2909,7 @@ boolean_t move;
dstvalid -= toxfer;
blkpgs -= toxfer;
- len -= i386_ptob(toxfer);
+ len -= ns532_ptob(toxfer);
if (blkpgs == 0) /* ran out of src PTEs? restart loop */
continue;
}
@@ -3123,7 +2977,7 @@ boolean_t move;
dstvalid -= toxfer;
blkpgs -= toxfer;
- len -= i386_ptob(toxfer);
+ len -= ns532_ptob(toxfer);
/*
* done src pte block
@@ -3168,7 +3022,7 @@ boolean_t move;
if (dstl->addr < VM_MAX_ADDRESS)
dstproto = PG_u; /* "user" page */
else
- dstproto = pmap_pg_g; /* kernel page */
+ dstproto = 0;
/*
* ensure we have dst PTP for user addresses.
@@ -3287,7 +3141,7 @@ boolean_t move;
if (opte & PG_W)
srcpmap->pm_stats.wired_count--;
}
- *dstl->pte = (opte & ~(PG_u|PG_U|PG_M|PG_G|PG_W)) | dstproto;/* install */
+ *dstl->pte = (opte & ~(PG_u|PG_U|PG_M|PG_W)) | dstproto;/* install */
dstpmap->pm_stats.resident_count++;
if (dstl->ptp)
dstl->ptp->wire_count++;
@@ -3329,13 +3183,14 @@ boolean_t move;
* => we set pmap => pv_head locking
*/
-void pmap_enter(pmap, va, pa, prot, wired)
+void pmap_enter(pmap, va, pa, prot, wired, access_type)
struct pmap *pmap;
vaddr_t va;
paddr_t pa;
vm_prot_t prot;
boolean_t wired;
+vm_prot_t access_type;
{
pt_entry_t *ptes, opte, npte;
@@ -3372,7 +3227,7 @@ boolean_t wired;
else
ptp = pmap_get_ptp(pmap, pdei(va), FALSE);
- opte = ptes[i386_btop(va)]; /* old PTE */
+ opte = ptes[ns532_btop(va)]; /* old PTE */
/*
@@ -3485,16 +3340,25 @@ boolean_t wired;
npte |= PG_u;
else if (va < VM_MAX_ADDRESS)
npte |= (PG_u | PG_RW); /* XXXCDC: no longer needed? */
- if (pmap == pmap_kernel())
- npte |= pmap_pg_g;
- ptes[i386_btop(va)] = npte; /* zap! */
+ ptes[ns532_btop(va)] = npte; /* zap! */
if ((opte & ~(PG_M|PG_U)) != npte && pmap_is_curpmap(pmap))
pmap_update_pg(va);
pmap_unmap_ptes(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
+
+ /*
+ * The only time we need to flush the cache is if we
+ * execute from a physical address and then change the data.
+ * This is the best place to do this.
+ * pmap_protect() and pmap_remove() are mostly used to switch
+ * between R/W and R/O pages.
+ * NOTE: we only support cache flush for read only text.
+ */
+ if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
+ cinv(ia, 0);
}
/*
@@ -3504,7 +3368,7 @@ boolean_t wired;
* the pmaps on the system.
*/
-void pmap_growkernel(maxkvaddr)
+vaddr_t pmap_growkernel(maxkvaddr)
vaddr_t maxkvaddr;
@@ -3515,7 +3379,7 @@ vaddr_t maxkvaddr;
needed_kpde = (int)(maxkvaddr - VM_MIN_KERNEL_ADDRESS + (NBPD-1)) / NBPD;
if (needed_kpde <= nkpde)
- return; /* we are OK */
+ goto out; /* we are OK */
/*
* whoops! we need to add kernel PTPs
@@ -3526,6 +3390,21 @@ vaddr_t maxkvaddr;
for (/*null*/ ; nkpde < needed_kpde ; nkpde++) {
+ if (pmap_initialized == FALSE) {
+ /*
+ * we're growing the kernel pmap early (from uvm_pageboot_alloc()).
+ * this case must be handled a little differently.
+ */
+ paddr_t ptaddr;
+
+ if (uvm_page_physget(&ptaddr) == FALSE)
+ panic("pmap_growkernel: out of memory");
+
+ kpm->pm_pdir[PDSLOT_KERN + nkpde] = ptaddr | PG_RW | PG_V;
+ kpm->pm_stats.resident_count++; /* count PTP as resident */
+ continue;
+ }
+
pmap_alloc_ptp(kpm, PDSLOT_KERN + nkpde, FALSE);
kpm->pm_pdir[PDSLOT_KERN + nkpde] &= ~PG_u; /* PG_u not for kernel */
@@ -3539,6 +3418,9 @@ vaddr_t maxkvaddr;
simple_unlock(&kpm->pm_obj.vmobjlock);
splx(s);
+
+ out:
+ return (VM_MIN_KERNEL_ADDRESS + (nkpde * NBPD));
}
#ifdef DEBUG
@@ -3581,14 +3463,14 @@ vaddr_t sva, eva;
for (/* null */ ; sva < eva ; sva = blkendva) {
/* determine range of block */
- blkendva = i386_round_pdr(sva+1);
+ blkendva = ns532_round_pdr(sva+1);
if (blkendva > eva)
blkendva = eva;
if (!pmap_valid_entry(pmap->pm_pdir[pdei(sva)])) /* valid block? */
continue;
- pte = &ptes[i386_btop(sva)];
+ pte = &ptes[ns532_btop(sva)];
for (/* null */; sva < blkendva ; sva += NBPG, pte++) {
if (!pmap_valid_entry(*pte)) continue;
printf("va %#lx -> pa %#x (pte=%#x)\n", sva, *pte, *pte & PG_FRAME);

0 comments on commit b649bed

Please sign in to comment.
Something went wrong with that request. Please try again.