version 1.11, 2004/09/04 13:43:11 |
version 1.12, 2005/04/01 11:59:33 |
Line 91 extern caddr_t msgbufaddr; |
|
Line 91 extern caddr_t msgbufaddr; |
|
void |
void |
pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) |
pmap_bootstrap(paddr_t nextpa, paddr_t firstpa) |
{ |
{ |
paddr_t kstpa, kptpa, iiopa, eiopa, kptmpa, p0upa; |
paddr_t kstpa, kptpa, kptmpa, p0upa; |
u_int nptpages, kstsize; |
u_int nptpages, kstsize; |
st_entry_t protoste, *ste; |
st_entry_t protoste, *ste; |
pt_entry_t protopte, *pte, *epte; |
pt_entry_t protopte, *pte, *epte; |
Line 106 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 106 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
* kptpa statically allocated |
* kptpa statically allocated |
* kernel PT pages Sysptsize+ pages |
* kernel PT pages Sysptsize+ pages |
* |
* |
* iiopa internal IO space |
|
* PT pages iiomapsize pages |
|
* |
|
* eiopa external IO space |
|
* PT pages eiomapsize pages |
|
* |
|
* [ Sysptsize is the number of pages of PT, IIOMAPSIZE and |
* [ Sysptsize is the number of pages of PT, IIOMAPSIZE and |
* EIOMAPSIZE are the number of PTEs, hence we need to round |
* EIOMAPSIZE are the number of PTEs, hence we need to round |
* the total to a page boundary with IO maps at the end. ] |
* the total to a page boundary with IO maps at the end. ] |
Line 142 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 136 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
#endif |
#endif |
kstpa = nextpa; |
kstpa = nextpa; |
nextpa += kstsize * PAGE_SIZE; |
nextpa += kstsize * PAGE_SIZE; |
kptpa = nextpa; |
|
nptpages = RELOC(Sysptsize, int) + |
|
(iiomapsize + eiomapsize + NPTEPG - 1) / NPTEPG; |
|
nextpa += nptpages * PAGE_SIZE; |
|
eiopa = nextpa - eiomapsize * sizeof(pt_entry_t); |
|
iiopa = eiopa - iiomapsize * sizeof(pt_entry_t); |
|
kptmpa = nextpa; |
kptmpa = nextpa; |
nextpa += PAGE_SIZE; |
nextpa += PAGE_SIZE; |
p0upa = nextpa; |
p0upa = nextpa; |
nextpa += USPACE; |
nextpa += USPACE; |
|
kptpa = nextpa; |
|
nptpages = RELOC(Sysptsize, int) + |
|
(iiomapsize + eiomapsize + NPTEPG - 1) / NPTEPG; |
|
nextpa += nptpages * PAGE_SIZE; |
|
|
/* |
/* |
* Clear all PTEs to zero |
* Clear all PTEs to zero |
Line 205 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 197 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
* Initialize level 2 descriptors (which immediately |
* Initialize level 2 descriptors (which immediately |
* follow the level 1 table). We need: |
* follow the level 1 table). We need: |
* NPTEPG / SG4_LEV3SIZE |
* NPTEPG / SG4_LEV3SIZE |
* level 2 descriptors to map each of the nptpages+1 |
* level 2 descriptors to map each of the nptpages |
* pages of PTEs. Note that we set the "used" bit |
* pages of PTEs. Note that we set the "used" bit |
* now to save the HW the expense of doing it. |
* now to save the HW the expense of doing it. |
*/ |
*/ |
num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); |
num = nptpages * (NPTEPG / SG4_LEV3SIZE); |
pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; |
pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; |
epte = &pte[num]; |
epte = &pte[num]; |
protoste = kptpa | SG_U | SG_RW | SG_V; |
protoste = kptpa | SG_U | SG_RW | SG_V; |
Line 237 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 229 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; |
pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; |
*ste = (u_int)pte | SG_U | SG_RW | SG_V; |
*ste = (u_int)pte | SG_U | SG_RW | SG_V; |
/* |
/* |
|
* Now initialize the final portion of that block of |
|
* descriptors to map Sysmap. |
|
*/ |
|
pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; |
|
epte = &pte[NPTEPG/SG4_LEV3SIZE]; |
|
protoste = kptmpa | SG_U | SG_RW | SG_V; |
|
while (pte < epte) { |
|
*pte++ = protoste; |
|
protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); |
|
} |
|
/* |
* Initialize Sysptmap |
* Initialize Sysptmap |
*/ |
*/ |
pte = (u_int *)kptmpa; |
pte = (u_int *)kptmpa; |
epte = &pte[nptpages+1]; |
epte = &pte[nptpages]; |
protopte = kptpa | PG_RW | PG_CI | PG_V; |
protopte = kptpa | PG_RW | PG_CI | PG_V; |
while (pte < epte) { |
while (pte < epte) { |
*pte++ = protopte; |
*pte++ = protopte; |
Line 249 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 252 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
/* |
/* |
* Invalidate all but the last remaining entry. |
* Invalidate all but the last remaining entry. |
*/ |
*/ |
epte = &((u_int *)kptmpa)[NPTEPG]; |
epte = &((u_int *)kptmpa)[NPTEPG-1]; |
while (pte < epte) { |
while (pte < epte) { |
*pte++ = PG_NV; |
*pte++ = PG_NV; |
} |
} |
|
/* |
|
* Initialize the last one to point to Sysptmap. |
|
*/ |
|
*pte = kptmpa | PG_RW | PG_CI | PG_V; |
} else |
} else |
#endif |
#endif |
{ |
{ |
/* |
/* |
* Map the page table pages in both the HW segment table |
* Map the page table pages in both the HW segment table |
* and the software Sysptmap. Note that Sysptmap is also |
* and the software Sysptmap. |
* considered a PT page hence the +1. |
|
*/ |
*/ |
ste = (u_int *)kstpa; |
ste = (u_int *)kstpa; |
pte = (u_int *)kptmpa; |
pte = (u_int *)kptmpa; |
epte = &pte[nptpages+1]; |
epte = &pte[nptpages]; |
protoste = kptpa | SG_RW | SG_V; |
protoste = kptpa | SG_RW | SG_V; |
protopte = kptpa | PG_RW | PG_CI | PG_V; |
protopte = kptpa | PG_RW | PG_CI | PG_V; |
while (pte < epte) { |
while (pte < epte) { |
Line 275 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 281 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
/* |
/* |
* Invalidate all but the last remaining entries in both. |
* Invalidate all but the last remaining entries in both. |
*/ |
*/ |
epte = &((u_int *)kptmpa)[NPTEPG]; |
epte = &((u_int *)kptmpa)[NPTEPG-1]; |
while (pte < epte) { |
while (pte < epte) { |
*ste++ = SG_NV; |
*ste++ = SG_NV; |
*pte++ = PG_NV; |
*pte++ = PG_NV; |
} |
} |
|
/* |
|
* Initialize the last one to point to Sysptmap. |
|
*/ |
|
*ste = kptmpa | SG_RW | SG_V; |
|
*pte = kptmpa | PG_RW | PG_CI | PG_V; |
} |
} |
|
|
/* |
/* |
* Initialize kernel page table. |
* Initialize kernel page table. |
* Start by invalidating the `nptpages' that we have allocated. |
* Start by invalidating the `nptpages' that we have allocated. |
Line 321 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 333 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
/* |
/* |
* Finally, validate the internal IO space PTEs (RW+CI). |
* Finally, validate the internal IO space PTEs (RW+CI). |
*/ |
*/ |
pte = (u_int *)iiopa; |
|
epte = (u_int *)eiopa; |
#define PTE2VA(pte) m68k_ptob(pte - ((pt_entry_t *)kptpa)) |
|
|
protopte = RELOC(intiobase_phys, u_int) | PG_RW | PG_CI | PG_V; |
protopte = RELOC(intiobase_phys, u_int) | PG_RW | PG_CI | PG_V; |
|
epte = &pte[iiomapsize]; |
|
RELOC(intiobase, char *) = (char *)PTE2VA(pte); |
|
RELOC(intiolimit, char *) = (char *)PTE2VA(epte); |
while (pte < epte) { |
while (pte < epte) { |
*pte++ = protopte; |
*pte++ = protopte; |
protopte += PAGE_SIZE; |
protopte += PAGE_SIZE; |
} |
} |
|
RELOC(extiobase, char *) = (char *)PTE2VA(pte); |
|
pte += eiomapsize; |
|
RELOC(virtual_avail, vaddr_t) = PTE2VA(pte); |
|
|
/* |
/* |
* Calculate important exported kernel virtual addresses |
* Calculate important exported kernel virtual addresses |
Line 347 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 366 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
* Immediately follows `nptpages' of static kernel page table. |
* Immediately follows `nptpages' of static kernel page table. |
*/ |
*/ |
RELOC(Sysmap, pt_entry_t *) = |
RELOC(Sysmap, pt_entry_t *) = |
(pt_entry_t *)m68k_ptob(nptpages * NPTEPG); |
(pt_entry_t *)m68k_ptob((NPTEPG - 1) * NPTEPG); |
/* |
|
* intiobase, intiolimit: base and end of internal IO space. |
|
*/ |
|
RELOC(intiobase, char *) = |
|
(char *)m68k_ptob(nptpages*NPTEPG - (iiomapsize + eiomapsize)); |
|
RELOC(intiolimit, char *) = |
|
(char *)m68k_ptob(nptpages*NPTEPG - eiomapsize); |
|
/* |
|
* extiobase: base of external IO space. |
|
* eiomapsize pages at the end of the static kernel page table. |
|
*/ |
|
RELOC(extiobase, char *) = |
|
(char *)m68k_ptob(nptpages*NPTEPG - eiomapsize); |
|
|
|
/* |
/* |
* Setup u-area for process 0. |
* Setup u-area for process 0. |
Line 391 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 397 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
(m68k_round_page(MSGBUFSIZE)); |
(m68k_round_page(MSGBUFSIZE)); |
RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int)); |
RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int)); |
|
|
RELOC(virtual_avail, vaddr_t) = |
|
VM_MIN_KERNEL_ADDRESS + (vaddr_t)(nextpa - firstpa); |
|
RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; |
RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; |
|
|
#if 0 |
#if 0 |
Line 450 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 454 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
* descriptor mask noting that we have used: |
* descriptor mask noting that we have used: |
* 0: level 1 table |
* 0: level 1 table |
* 1 to `num': map page tables |
* 1 to `num': map page tables |
* MAXKL2SIZE-1: maps last-page page table |
* MAXKL2SIZE-1: maps kptmpa |
*/ |
*/ |
#ifdef M68040 |
#ifdef M68040 |
if (RELOC(mmutype, int) == MMU_68040) { |
if (RELOC(mmutype, int) == MMU_68040) { |
int num; |
int num; |
|
|
kpm->pm_stfree = ~l2tobm(0); |
kpm->pm_stfree = ~l2tobm(0); |
num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), |
num = roundup(nptpages * (NPTEPG / SG4_LEV3SIZE), |
SG4_LEV2SIZE) / SG4_LEV2SIZE; |
SG4_LEV2SIZE) / SG4_LEV2SIZE; |
while (num) |
while (num) |
kpm->pm_stfree &= ~l2tobm(num--); |
kpm->pm_stfree &= ~l2tobm(num--); |
Line 487 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
Line 491 pmap_bootstrap(paddr_t nextpa, paddr_t f |
|
RELOC(virtual_avail, vaddr_t) = va; |
RELOC(virtual_avail, vaddr_t) = va; |
} |
} |
} |
} |
|
|
void |
|
pmap_init_md(void) |
|
{ |
|
vaddr_t addr; |
|
|
|
addr = (vaddr_t) intiobase; |
|
if (uvm_map(kernel_map, &addr, |
|
intiotop_phys - intiobase_phys, |
|
NULL, UVM_UNKNOWN_OFFSET, 0, |
|
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, |
|
UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0) |
|
panic("pmap_init_md: uvm_map failed"); |
|
} |
|