version 1.55, 2000/06/15 15:51:07 |
version 1.56, 2000/06/19 23:30:36 |
Line 181 typedef struct pv_entry { |
|
Line 181 typedef struct pv_entry { |
|
* and collect/clear all the ref/mod information and copy it into the pv_entry. |
* and collect/clear all the ref/mod information and copy it into the pv_entry. |
*/ |
*/ |
|
|
|
#ifdef NO_VCACHE |
|
#define FORCE_ALIAS 1 |
|
#else |
|
#define FORCE_ALIAS 0 |
|
#endif |
|
|
#define PV_ALIAS 0x1LL |
#define PV_ALIAS 0x1LL |
#define PV_REF 0x2LL |
#define PV_REF 0x2LL |
#define PV_MOD 0x4LL |
#define PV_MOD 0x4LL |
Line 219 int tsbsize; /* tsbents = 512 * 2^^tsbs |
|
Line 225 int tsbsize; /* tsbents = 512 * 2^^tsbs |
|
struct pmap kernel_pmap_; |
struct pmap kernel_pmap_; |
|
|
int physmem; |
int physmem; |
u_long ksegv; /* vaddr start of kernel */ |
/* |
u_int64_t ksegp; /* paddr of start of kernel */ |
* Virtual and physical addresses of the start and end of kernel text |
u_long ksegend; |
* and data segments. |
u_int64_t ksegpend; |
*/ |
|
vaddr_t ktext; |
|
paddr_t ktextp; |
|
vaddr_t ektext; |
|
paddr_t ektextp; |
|
vaddr_t kdata; |
|
paddr_t kdatap; |
|
vaddr_t ekdata; |
|
paddr_t ekdatap; |
|
|
static int npgs; |
static int npgs; |
static u_int nextavail; |
static u_int nextavail; |
static struct mem_region memlist[8]; /* Pick a random size here */ |
static struct mem_region memlist[8]; /* Pick a random size here */ |
|
|
caddr_t vmmap; /* one reserved MI vpage for /dev/mem */ |
vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */ |
|
|
struct mem_region *mem, *avail, *orig; |
struct mem_region *mem, *avail, *orig; |
int memsize; |
int memsize; |
|
|
#define CTXSIZE (numctx*CTXENTRY) |
#define CTXSIZE (numctx*CTXENTRY) |
|
|
#define pmap_get_page(p) uvm_page_physget((p)); |
#define pmap_get_page(p) uvm_page_physget((p)); |
|
|
|
|
|
/* |
|
* Enter a TTE into the kernel pmap only. Don't do anything else. |
|
*/ |
|
static void pmap_enter_kpage __P((vaddr_t, int64_t)); |
|
static void |
|
pmap_enter_kpage(va, data) |
|
vaddr_t va; |
|
int64_t data; |
|
{ |
|
paddr_t newp; |
|
|
|
newp = NULL; |
|
while (pseg_set(pmap_kernel(), va, data, newp) != NULL) { |
|
pmap_get_page(&newp); |
|
pmap_zero_page(newp); |
|
#ifdef DEBUG |
|
enter_stats.ptpneeded ++; |
|
#endif |
|
#ifdef BOOT1_DEBUG |
|
prom_printf( |
|
"pseg_set: pm=%p va=%p data=%lx newp %lx\r\n", |
|
pmap_kernel(), va, (long)data, (long)newp); |
|
{int i; for (i=0; i<140000000; i++) ;} |
|
#endif |
|
} |
|
} |
|
|
/* |
/* |
* This is called during bootstrap, before the system is really initialized. |
* This is called during bootstrap, before the system is really initialized. |
* |
* |
* It's called with the start and end virtual addresses of the kernel. |
* It's called with the start and end virtual addresses of the kernel. We |
* We bootstrap the pmap allocator now. We will allocate the basic |
* bootstrap the pmap allocator now. We will allocate the basic structures we |
* structures we need to bootstrap the VM system here: the page frame |
* need to bootstrap the VM system here: the page frame tables, the TSB, and |
* tables, the TSB, and the free memory lists. |
* the free memory lists. |
|
* |
|
* Now all this is becoming a bit obsolete. maxctx is still important, but by |
|
* separating the kernel text and data segments we really would need to |
|
* provide the start and end of each segment. But we can't. The rodata |
|
* segment is attached to the end of the kernel segment and has nothing to |
|
* delimit its end. We could still pass in the beginning of the kernel and |
|
* the beginning and end of the data segment but we could also just as easily |
|
* calculate that all in here. |
|
* |
|
* To handle the kernel text, we need to do a reverse mapping of the start of |
|
* the kernel, then traverse the free memory lists to find out how big it is. |
*/ |
*/ |
|
|
void |
void |
pmap_bootstrap(kernelstart, kernelend, maxctx) |
pmap_bootstrap(kernelstart, kernelend, maxctx) |
u_long kernelstart, kernelend; |
u_long kernelstart, kernelend; |
u_int maxctx; |
u_int maxctx; |
{ |
{ |
|
extern int data_start[], end[]; /* start of data segment */ |
extern int msgbufmapped; |
extern int msgbufmapped; |
struct mem_region *mp, *mp1; |
struct mem_region *mp, *mp1; |
int msgbufsiz; |
int msgbufsiz; |
int pcnt; |
int pcnt; |
size_t s, sz; |
size_t s, sz; |
int i, j; |
int i, j; |
|
int64_t data; |
|
vaddr_t va; |
u_int64_t phys_msgbuf; |
u_int64_t phys_msgbuf; |
u_long firstaddr, newkp, ksize; |
paddr_t newkp; |
u_long *newkv; |
vaddr_t newkv, firstaddr; |
|
vsize_t kdsize, ktsize; |
#ifdef DEBUG |
#ifdef DEBUG |
int opmapdebug = pmapdebug; |
int opmapdebug = pmapdebug; |
pmapdebug = 0; |
pmapdebug = 0; |
Line 414 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 474 pmap_bootstrap(kernelstart, kernelend, m |
|
* Get hold or the message buffer. |
* Get hold or the message buffer. |
*/ |
*/ |
msgbufp = (struct kern_msgbuf *)MSGBUF_VA; |
msgbufp = (struct kern_msgbuf *)MSGBUF_VA; |
/* XXXXX -- for uvmhist printing */ |
/* XXXXX -- increase msgbufsiz for uvmhist printing */ |
msgbufsiz = 4*NBPG /* round_page(sizeof(struct msgbuf)) */; |
msgbufsiz = 4*NBPG /* round_page(sizeof(struct msgbuf)) */; |
#ifdef BOOT_DEBUG |
#ifdef BOOT_DEBUG |
prom_printf("Trying to allocate msgbuf at %lx, size %lx\r\n", |
prom_printf("Trying to allocate msgbuf at %lx, size %lx\r\n", |
Line 441 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 501 pmap_bootstrap(kernelstart, kernelend, m |
|
initmsgbuf((caddr_t)msgbufp, msgbufsiz); |
initmsgbuf((caddr_t)msgbufp, msgbufsiz); |
|
|
/* |
/* |
* Record kernel mapping -- we will map this with a permanent 4MB |
* Record kernel mapping -- we will map these with a permanent 4MB |
* TLB entry when we initialize the CPU later. |
* TLB entry when we initialize the CPU later. |
*/ |
*/ |
#ifdef BOOT_DEBUG |
#ifdef BOOT_DEBUG |
prom_printf("translating kernelstart %p\r\n", (void *)kernelstart); |
prom_printf("translating kernelstart %p\r\n", (void *)kernelstart); |
#endif |
#endif |
ksegv = kernelstart; |
ktext = kernelstart; |
ksegp = prom_vtop(kernelstart); |
ktextp = prom_vtop(kernelstart); |
|
|
|
kdata = (vaddr_t)data_start; |
|
kdatap = prom_vtop(kdata); |
|
ekdata = (vaddr_t)end; |
|
|
/* |
/* |
* Find the real size of the kernel. Locate the smallest starting |
* Find the real size of the kernel. Locate the smallest starting |
Line 458 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 522 pmap_bootstrap(kernelstart, kernelend, m |
|
/* |
/* |
* Check whether this region is at the end of the kernel. |
* Check whether this region is at the end of the kernel. |
*/ |
*/ |
if (mp->start > kernelstart && (mp1->start < kernelstart || |
if (mp->start >= ekdata && (mp1->start < ekdata || |
mp1->start > mp->start)) |
mp1->start > mp->start)) |
mp1 = mp; |
mp1 = mp; |
} |
} |
if (mp1->start < kernelstart) |
if (mp1->start < kdata) |
prom_printf("Kernel at end of vmem???\r\n"); |
prom_printf("Kernel at end of vmem???\r\n"); |
|
|
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("The kernel is mapped at %lx, next free seg: %lx, %lx\r\n", |
prom_printf("The kernel data is mapped at %lx, next free seg: %lx, %lx\r\n", |
(long)ksegp, (u_long)mp1->start, (u_long)mp1->size); |
(long)kdata, (u_long)mp1->start, (u_long)mp1->size); |
#endif |
#endif |
/* |
/* |
* This it bogus and will be changed when the kernel is rounded to 4MB. |
* This it bogus and will be changed when the kernel is rounded to 4MB. |
*/ |
*/ |
firstaddr = (kernelend + 07) & ~ 07; /* Longword align */ |
firstaddr = (ekdata + 07) & ~ 07; /* Longword align */ |
|
|
#if 1 |
#if 1 |
#define valloc(name, type, num) (name) = (type *)firstaddr; firstaddr += (num) |
#define valloc(name, type, num) (name) = (type *)firstaddr; firstaddr += (num) |
Line 482 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 547 pmap_bootstrap(kernelstart, kernelend, m |
|
#define MEG (1<<20) /* 1MB */ |
#define MEG (1<<20) /* 1MB */ |
|
|
/* |
/* |
* Since we can't give the loader the hint to align us on a 4MB |
* Since we can't always give the loader the hint to align us on a 4MB |
* boundary, we will need to do the alignment ourselves. First |
* boundary, we will need to do the alignment ourselves. First |
* allocate a new 4MB aligned segment for the kernel, then map it |
* allocate a new 4MB aligned segment for the kernel, then map it |
* in, copy the kernel over, swap mappings, then finally, free the |
* in, copy the kernel over, swap mappings, then finally, free the |
* old kernel. Then we can continue with this. |
* old kernel. Then we can continue with this. |
|
* |
|
* We'll do the data segment up here since we know how big it is. |
|
* We'll do the text segment after we've read in the PROM translations |
|
* so we can figure out its size. |
*/ |
*/ |
ksize = round_page(kernelend - kernelstart); |
kdsize = round_page(ekdata - kdata); |
|
|
if (ksegp & (4*MEG-1)) { |
if (kdatap & (4*MEG-1)) { |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("Allocating new %lx kernel at 4MB boundary\r\n", |
prom_printf("Allocating new %lx kernel data at 4MB boundary\r\n", |
(u_long)ksize); |
(u_long)kdsize); |
#endif |
#endif |
if ((newkp = prom_alloc_phys(ksize, 4*MEG)) == 0 ) { |
if ((newkp = prom_alloc_phys(4*MEG, 4*MEG)) == 0 ) { |
prom_printf("Cannot allocate new kernel\r\n"); |
prom_printf("Cannot allocate new kernel\r\n"); |
OF_exit(); |
OF_exit(); |
} |
} |
Line 503 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 572 pmap_bootstrap(kernelstart, kernelend, m |
|
prom_printf("Allocating new va for buffer at %p\r\n", |
prom_printf("Allocating new va for buffer at %p\r\n", |
(void *)newkp); |
(void *)newkp); |
#endif |
#endif |
if ((newkv = (u_long*)prom_alloc_virt(ksize, 8)) == |
if ((newkv = (vaddr_t)prom_alloc_virt(4*MEG, 8)) == |
(u_long*)-1) { |
(vaddr_t)-1) { |
prom_printf("Cannot allocate new kernel va\r\n"); |
prom_printf("Cannot allocate new kernel va\r\n"); |
OF_exit(); |
OF_exit(); |
} |
} |
Line 514 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 583 pmap_bootstrap(kernelstart, kernelend, m |
|
#endif |
#endif |
prom_map_phys(newkp, 4*MEG, (vaddr_t)newkv, -1); |
prom_map_phys(newkp, 4*MEG, (vaddr_t)newkv, -1); |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("Copying kernel..."); |
prom_printf("Copying %ld bytes kernel data...", kdsize); |
#endif |
#endif |
bzero(newkv, 4*MEG); |
bzero((void *)newkv, 4*MEG); |
bcopy((void *)kernelstart, (void *)newkv, |
bcopy((void *)kdata, (void *)newkv, |
kernelend - kernelstart); |
kdsize); |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("done. Swapping maps..unmap new\r\n"); |
prom_printf("done. Swapping maps..unmap new\r\n"); |
#endif |
#endif |
Line 526 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 595 pmap_bootstrap(kernelstart, kernelend, m |
|
#ifdef BOOT_DEBUG |
#ifdef BOOT_DEBUG |
prom_printf("remap old "); |
prom_printf("remap old "); |
#endif |
#endif |
prom_map_phys(newkp, 4*MEG, kernelstart, -1); |
#if 0 |
|
/* |
|
* calling the prom will probably require reading part of the |
|
* data segment so we can't do this. |
|
*/ |
|
prom_unmap_virt((vaddr_t)kdatap, kdsize); |
|
#endif |
|
prom_map_phys(newkp, 4*MEG, kdata, -1); |
/* |
/* |
* we will map in 4MB, more than we allocated, to allow |
* we will map in 4MB, more than we allocated, to allow |
* further allocation |
* further allocation |
Line 534 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 610 pmap_bootstrap(kernelstart, kernelend, m |
|
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("free old\r\n"); |
prom_printf("free old\r\n"); |
#endif |
#endif |
prom_free_phys(ksegp, ksize); |
prom_free_phys(kdatap, kdsize); |
ksegp = newkp; |
kdatap = newkp; |
|
|
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("pmap_bootstrap: firstaddr is %lx virt (%lx phys)" |
prom_printf("pmap_bootstrap: firstaddr is %lx virt (%lx phys)" |
Line 544 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 620 pmap_bootstrap(kernelstart, kernelend, m |
|
#endif |
#endif |
} else { |
} else { |
/* We was at a 4MB boundary after all! */ |
/* We was at a 4MB boundary after all! */ |
newkp = ksegp; |
#if 1 |
|
/* |
|
* XXXX should we not overlap? |
|
*/ |
|
prom_map_phys(kdatap, 4*MEG, kdata, -1); |
|
#else |
|
psize_t szdiff = 4*MEG - kdsize; |
|
|
/* Make sure all 4MB are mapped */ |
/* Claim the rest of the physical page. */ |
prom_map_phys(ksegp, 4*MEG, kernelstart, -1); |
newkp = kdatap + kdsize; |
} |
newkv = kdata + kdsize; |
|
if (kdatap != prom_claim_phys(newkp, szdiff)) |
|
prom_printf("pmap_bootstrap: could not claim physical " |
|
"dseg extention at %lx size %lx\r\n", newkp, szdiff); |
|
/* And the rest of the virtual page. */ |
|
if (prom_claim_virt(newkv, szdiff) != newkv) |
|
prom_printf("pmap_bootstrap: could not claim virtual " |
|
"dseg extention at size %lx\r\n", newkv, szdiff); |
|
|
/* |
/* Make sure all 4MB are mapped */ |
* Allocate a 64MB page for the cpu_info structure now. |
prom_map_phys(newkp, szdiff, newkv, -1); |
*/ |
#endif |
if ((cpu0paddr = prom_alloc_phys(8*NBPG, 8*NBPG)) == 0 ) { |
|
prom_printf("Cannot allocate new cpu_info\r\n"); |
|
OF_exit(); |
|
} |
} |
|
|
/* |
/* |
Line 604 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 690 pmap_bootstrap(kernelstart, kernelend, m |
|
tsbsize = 2; |
tsbsize = 2; |
|
|
/* |
/* |
* Count the number of available entries. And make an extra |
|
* copy to fiddle with. |
|
*/ |
|
sz = OF_getproplen(memh, "available") + sizeof(struct mem_region); |
|
valloc(orig, struct mem_region, sz); |
|
bzero((caddr_t)orig, sz); |
|
if (OF_getprop(memh, "available", orig, sz) <= 0) { |
|
prom_printf("no available RAM?"); |
|
OF_exit(); |
|
} |
|
|
|
#ifdef BOOT1_DEBUG |
|
/* print out mem list */ |
|
prom_printf("Available physical memory:\r\n"); |
|
for (mp = orig; mp->size; mp++) { |
|
prom_printf("memlist start %lx size %lx\r\n", |
|
(u_long)mp->start, (u_long)mp->size); |
|
} |
|
prom_printf("End of available physical memory\r\n"); |
|
#endif |
|
valloc(avail, struct mem_region, sz); |
|
bzero((caddr_t)avail, sz); |
|
for (pcnt = 0, mp = orig, mp1 = avail; (mp1->size = mp->size); |
|
mp++, mp1++) { |
|
mp1->start = mp->start; |
|
pcnt++; |
|
} |
|
|
|
/* |
|
* Save the prom translations |
* Save the prom translations |
*/ |
*/ |
sz = OF_getproplen(vmemh, "translations"); |
sz = OF_getproplen(vmemh, "translations"); |
Line 653 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 710 pmap_bootstrap(kernelstart, kernelend, m |
|
prom_printf("End of prom xlations\r\n"); |
prom_printf("End of prom xlations\r\n"); |
#endif |
#endif |
/* |
/* |
|
* Hunt for the kernel text segment and figure out it size and |
|
* alignment. |
|
*/ |
|
for (i = 0; i < prom_map_size; i++) |
|
if (prom_map[i].vstart == ktext) |
|
break; |
|
if (i == prom_map_size) |
|
panic("No kernel text segment!\r\n"); |
|
ktsize = prom_map[i].vsize; |
|
ektext = ktext + ktsize; |
|
|
|
if (ktextp & (4*MEG-1)) { |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Allocating new %lx kernel text at 4MB boundary\r\n", |
|
(u_long)ktsize); |
|
#endif |
|
if ((newkp = prom_alloc_phys(ktsize, 4*MEG)) == 0 ) { |
|
prom_printf("Cannot allocate new kernel text\r\n"); |
|
OF_exit(); |
|
} |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Allocating new va for buffer at %p\r\n", |
|
(void *)newkp); |
|
#endif |
|
if ((newkv = (vaddr_t)prom_alloc_virt(ktsize, 8)) == |
|
(vaddr_t)-1) { |
|
prom_printf("Cannot allocate new kernel text va\r\n"); |
|
OF_exit(); |
|
} |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Mapping in buffer %lx at %lx\r\n", |
|
(u_long)newkp, (u_long)newkv); |
|
#endif |
|
prom_map_phys(newkp, ktsize, (vaddr_t)newkv, -1); |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Copying %ld bytes kernel text...", ktsize); |
|
#endif |
|
bcopy((void *)ktext, (void *)newkv, |
|
ktsize); |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("done. Swapping maps..unmap new\r\n"); |
|
#endif |
|
prom_unmap_virt((vaddr_t)newkv, 4*MEG); |
|
#ifdef BOOT_DEBUG |
|
prom_printf("remap old "); |
|
#endif |
|
#if 0 |
|
/* |
|
* calling the prom will probably require reading part of the |
|
* text segment so we can't do this. |
|
*/ |
|
prom_unmap_virt((vaddr_t)ktextp, ktsize); |
|
#endif |
|
prom_map_phys(newkp, ktsize, ktext, -1); |
|
/* |
|
* we will map in 4MB, more than we allocated, to allow |
|
* further allocation |
|
*/ |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("free old\r\n"); |
|
#endif |
|
prom_free_phys(ktextp, ktsize); |
|
ktextp = newkp; |
|
|
|
#ifdef BOOT1_DEBUG |
|
prom_printf("pmap_bootstrap: firstaddr is %lx virt (%lx phys)" |
|
"avail for kernel\r\n", (u_long)firstaddr, |
|
(u_long)prom_vtop(firstaddr)); |
|
#endif |
|
|
|
/* |
|
* Re-fetch translations -- they've certainly changed. |
|
*/ |
|
if (OF_getprop(vmemh, "translations", (void*)prom_map, sz) <= |
|
0) { |
|
prom_printf("no translations installed?"); |
|
OF_exit(); |
|
} |
|
#ifdef BOOT_DEBUG |
|
/* print out mem list */ |
|
prom_printf("New prom xlations:\r\n"); |
|
for (i = 0; i < prom_map_size; i++) { |
|
prom_printf("start %016lx size %016lx tte %016lx\r\n", |
|
(u_long)prom_map[i].vstart, |
|
(u_long)prom_map[i].vsize, |
|
(u_long)prom_map[i].tte); |
|
} |
|
prom_printf("End of prom xlations\r\n"); |
|
#endif |
|
} |
|
ektextp = ktextp + ktsize; |
|
|
|
/* |
* Here's a quick in-lined reverse bubble sort. It gets rid of |
* Here's a quick in-lined reverse bubble sort. It gets rid of |
* any translations inside the kernel VA range. |
* any translations inside the kernel data VA range. |
*/ |
*/ |
for(i = 0; i < prom_map_size; i++) { |
for(i = 0; i < prom_map_size; i++) { |
if (prom_map[i].vstart >= ksegv && |
if (prom_map[i].vstart >= kdata && |
prom_map[i].vstart <= firstaddr) { |
prom_map[i].vstart <= firstaddr) { |
prom_map[i].vstart = 0; |
prom_map[i].vstart = 0; |
prom_map[i].vsize = 0; |
prom_map[i].vsize = 0; |
} |
} |
|
if (prom_map[i].vstart >= ktext && |
|
prom_map[i].vstart <= ektext) { |
|
prom_map[i].vstart = 0; |
|
prom_map[i].vsize = 0; |
|
} |
for(j = i; j < prom_map_size; j++) { |
for(j = i; j < prom_map_size; j++) { |
if (prom_map[j].vstart >= ksegv && |
if (prom_map[j].vstart >= kdata && |
prom_map[j].vstart <= firstaddr) |
prom_map[j].vstart <= firstaddr) |
continue; /* this is inside the kernel */ |
continue; /* this is inside the kernel */ |
|
if (prom_map[j].vstart >= ktext && |
|
prom_map[j].vstart <= ektext) |
|
continue; /* this is inside the kernel */ |
if (prom_map[j].vstart > prom_map[i].vstart) { |
if (prom_map[j].vstart > prom_map[i].vstart) { |
struct prom_map tmp; |
struct prom_map tmp; |
tmp = prom_map[i]; |
tmp = prom_map[i]; |
Line 686 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 844 pmap_bootstrap(kernelstart, kernelend, m |
|
#endif |
#endif |
|
|
/* |
/* |
|
* Allocate a 64MB page for the cpu_info structure now. |
|
*/ |
|
if ((cpu0paddr = prom_alloc_phys(8*NBPG, 8*NBPG)) == 0 ) { |
|
prom_printf("Cannot allocate new cpu_info\r\n"); |
|
OF_exit(); |
|
} |
|
|
|
|
|
/* |
|
* Now the kernel text segment is in its final location we can try to |
|
* find out how much memory really is free. |
|
*/ |
|
sz = OF_getproplen(memh, "available") + sizeof(struct mem_region); |
|
valloc(orig, struct mem_region, sz); |
|
bzero((caddr_t)orig, sz); |
|
if (OF_getprop(memh, "available", orig, sz) <= 0) { |
|
prom_printf("no available RAM?"); |
|
OF_exit(); |
|
} |
|
#ifdef BOOT1_DEBUG |
|
/* print out mem list */ |
|
prom_printf("Available physical memory:\r\n"); |
|
for (mp = orig; mp->size; mp++) { |
|
prom_printf("memlist start %lx size %lx\r\n", |
|
(u_long)mp->start, (u_long)mp->size); |
|
} |
|
prom_printf("End of available physical memory\r\n"); |
|
#endif |
|
valloc(avail, struct mem_region, sz); |
|
bzero((caddr_t)avail, sz); |
|
for (pcnt = 0, mp = orig, mp1 = avail; (mp1->size = mp->size); |
|
mp++, mp1++) { |
|
mp1->start = mp->start; |
|
pcnt++; |
|
} |
|
|
|
/* |
* Allocate and initialize a context table |
* Allocate and initialize a context table |
*/ |
*/ |
numctx = maxctx; |
numctx = maxctx; |
Line 749 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 944 pmap_bootstrap(kernelstart, kernelend, m |
|
prom_printf("kernel virtual size %08lx - %08lx\r\n", |
prom_printf("kernel virtual size %08lx - %08lx\r\n", |
(u_long)kernelstart, (u_long)firstaddr); |
(u_long)kernelstart, (u_long)firstaddr); |
#endif |
#endif |
kernelstart = kernelstart & ~PGOFSET; |
kdata = kdata & ~PGOFSET; |
kernelend = firstaddr; |
ekdata = firstaddr; |
kernelend = (kernelend + PGOFSET) & ~PGOFSET; |
ekdata = (ekdata + PGOFSET) & ~PGOFSET; |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("kernel virtual size %08lx - %08lx\r\n", |
prom_printf("kernel virtual size %08lx - %08lx\r\n", |
(u_long)kernelstart, (u_long)kernelend); |
(u_long)kernelstart, (u_long)kernelend); |
#endif |
#endif |
ksegend = kernelend; |
ekdatap = ekdata - kdata + kdatap; |
ksegpend = kernelend - kernelstart + ksegp; |
|
/* Switch from vaddrs to paddrs */ |
/* Switch from vaddrs to paddrs */ |
kernelstart = ksegp & ~PGOFSET; |
if(ekdatap > (kdatap + 4*MEG)) { |
kernelend = ksegpend; |
|
if(kernelend > (kernelstart + 4*MEG)) { |
|
prom_printf("Kernel size exceeds 4MB\r\n"); |
prom_printf("Kernel size exceeds 4MB\r\n"); |
panic("kernel segment size exceeded\n"); |
panic("kernel segment size exceeded\n"); |
OF_exit(); |
OF_exit(); |
Line 777 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 969 pmap_bootstrap(kernelstart, kernelend, m |
|
(u_long)mp->size); |
(u_long)mp->size); |
} |
} |
prom_printf("End of available physical memory before cleanup\r\n"); |
prom_printf("End of available physical memory before cleanup\r\n"); |
prom_printf("kernel physical size %08lx - %08lx\r\n", |
prom_printf("kernel physical text size %08lx - %08lx\r\n", |
(u_long)kernelstart, (u_long)kernelend); |
(u_long)ktextp, (u_long)ektextp); |
|
prom_printf("kernel physical data size %08lx - %08lx\r\n", |
|
(u_long)kdatap, (u_long)ekdatap); |
#endif |
#endif |
/* |
/* |
* Here's a another quick in-lined bubble sort. |
* Here's a another quick in-lined bubble sort. |
*/ |
*/ |
for (i = 0; i < pcnt; i++) { |
for (i = 0; i < pcnt; i++) { |
/* XXXMRG: why add up npgs when it is set to zero below? */ |
|
npgs += btoc(avail[i].size); |
|
for (j = i; j < pcnt; j++) { |
for (j = i; j < pcnt; j++) { |
if (avail[j].start < avail[i].start) { |
if (avail[j].start < avail[i].start) { |
struct mem_region tmp; |
struct mem_region tmp; |
Line 796 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 988 pmap_bootstrap(kernelstart, kernelend, m |
|
} |
} |
} |
} |
|
|
|
/* Throw away page zero if we have it. */ |
|
if (avail->start == 0) { |
|
avail->start += NBPG; |
|
avail->size -= NBPG; |
|
} |
|
/* |
|
* Now we need to remove the area we valloc'ed from the available |
|
* memory lists. (NB: we may have already alloc'ed the entire space). |
|
*/ |
npgs = 0; |
npgs = 0; |
for (mp = avail; mp->size; mp++) { |
for (mp = avail; mp->size; mp++) { |
/* |
/* |
* Check whether this region holds all of the kernel. |
* Check whether this region holds all of the kernel. |
*/ |
*/ |
s = mp->start + mp->size; |
s = mp->start + mp->size; |
if (mp->start < kernelstart && s > kernelend) { |
if (mp->start < kdatap && s > (kdatap + 4*MEG)) { |
avail[pcnt].start = kernelend; |
avail[pcnt].start = kdatap + 4*MEG; |
avail[pcnt++].size = s - kernelend; |
avail[pcnt++].size = s - kdatap; |
mp->size = kernelstart - mp->start; |
mp->size = kdatap - mp->start; |
} |
} |
/* |
/* |
* Look whether this regions starts within the kernel. |
* Look whether this regions starts within the kernel. |
*/ |
*/ |
if (mp->start >= kernelstart && mp->start < kernelend) { |
if (mp->start >= kdatap && mp->start < (kdatap + 4*MEG)) { |
s = kernelend - mp->start; |
s = ekdatap - mp->start; |
if (mp->size > s) |
if (mp->size > s) |
mp->size -= s; |
mp->size -= s; |
else |
else |
mp->size = 0; |
mp->size = 0; |
mp->start = kernelend; |
mp->start = (kdatap + 4*MEG); |
} |
} |
/* |
/* |
* Now look whether this region ends within the kernel. |
* Now look whether this region ends within the kernel. |
*/ |
*/ |
s = mp->start + mp->size; |
s = mp->start + mp->size; |
if (s > kernelstart && s < kernelend) |
if (s > kdatap && s < (kdatap + 4*MEG)) |
mp->size -= s - kernelstart; |
mp->size -= s - kdatap; |
/* |
/* |
* Now page align the start of the region. |
* Now page align the start of the region. |
*/ |
*/ |
Line 869 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1070 pmap_bootstrap(kernelstart, kernelend, m |
|
VM_FREELIST_DEFAULT); |
VM_FREELIST_DEFAULT); |
} |
} |
|
|
#ifdef BOOT_DEBUG |
/* finally, free up any space that valloc did not use */ |
/* Throw away page zero if we have it. */ |
if (ekdatap < (kdatap + (4*MEG))) { |
if (avail->start == 0) { |
uvm_page_physload(atop(ekdatap), atop(kdatap + (4*MEG)), |
avail->start += NBPG; |
atop(ekdatap), atop(kdatap + (4*MEG)), |
avail->size -= NBPG; |
VM_FREELIST_DEFAULT); |
} |
} |
|
|
|
#ifdef BOOT_DEBUG |
/* print out mem list */ |
/* print out mem list */ |
prom_printf("Available physical memory after cleanup:\r\n"); |
prom_printf("Available physical memory after cleanup:\r\n"); |
for (mp = avail; mp->size; mp++) { |
for (mp = avail; mp->size; mp++) { |
Line 915 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1118 pmap_bootstrap(kernelstart, kernelend, m |
|
prom_printf("Inserting mesgbuf into pmap_kernel()\r\n"); |
prom_printf("Inserting mesgbuf into pmap_kernel()\r\n"); |
#endif |
#endif |
/* it's not safe to call pmap_enter so we need to do this ourselves */ |
/* it's not safe to call pmap_enter so we need to do this ourselves */ |
{ |
va = (vaddr_t)msgbufp; |
pte_t tte; |
while (msgbufsiz) { |
vaddr_t va = (vaddr_t)msgbufp; |
|
paddr_t newp; |
|
|
|
prom_map_phys(phys_msgbuf, NBPG, (vaddr_t)msgbufp, -1); |
prom_map_phys(phys_msgbuf, NBPG, (vaddr_t)msgbufp, -1); |
#ifdef NO_VCACHE |
data = TSB_DATA(0 /* global */, |
tte.data.data = TSB_DATA(0 /* global */, |
TLB_8K, |
TLB_8K, |
phys_msgbuf, |
phys_msgbuf, |
1 /* priv */, |
1 /* priv */, |
1 /* Write */, |
1 /* Write */, |
1 /* Cacheable */, |
1 /* Cacheable */, |
FORCE_ALIAS /* ALIAS -- Disable D$ */, |
1 /* ALIAS -- Disable D$ */, |
1 /* valid */, |
1 /* valid */, |
0 /* IE */); |
0 /* IE */); |
pmap_enter_kpage(va, data); |
#else |
va += NBPG; |
tte.data.data = TSB_DATA(0 /* global */, |
msgbufsiz -= NBPG; |
TLB_8K, |
phys_msgbuf += NBPG; |
phys_msgbuf, |
|
1 /* priv */, |
|
1 /* Write */, |
|
1 /* Cacheable */, |
|
0 /* No ALIAS */, |
|
1 /* valid */, |
|
0 /* IE */); |
|
#endif |
|
newp = NULL; |
|
while (pseg_set(pmap_kernel(), va, tte.data.data, newp) |
|
!= NULL) { |
|
pmap_get_page(&newp); |
|
pmap_zero_page(newp); |
|
#ifdef DEBUG |
|
enter_stats.ptpneeded ++; |
|
#endif |
|
#ifdef BOOT1_DEBUG |
|
prom_printf( |
|
"pseg_set: pm=%p va=%p data=%lx newp %lx\r\n", |
|
pmap_kernel(), va, (long)tte.data.data, (long)newp); |
|
{int i; for (i=0; i<140000000; i++) ;} |
|
#endif |
|
} |
|
|
|
/* |
|
* Also add a global NFO mapping for page zero. |
|
*/ |
|
tte.data.data = TSB_DATA(0 /* global */, |
|
TLB_8K, |
|
0 /* Physaddr */, |
|
1 /* priv */, |
|
0 /* Write */, |
|
1 /* Cacheable */, |
|
0 /* No ALIAS */, |
|
1 /* valid */, |
|
0 /* IE */); |
|
tte.data.data |= TLB_L|TLB_NFO; |
|
newp = NULL; |
|
while(pseg_set(pmap_kernel(), va, tte.data.data, newp) |
|
!= NULL) { |
|
pmap_get_page(&newp); |
|
pmap_zero_page(newp); |
|
#ifdef DEBUG |
|
enter_stats.ptpneeded ++; |
|
#endif |
|
} |
|
} |
} |
|
|
|
/* |
|
* Also add a global NFO mapping for page zero. |
|
*/ |
|
data = TSB_DATA(0 /* global */, |
|
TLB_8K, |
|
0 /* Physaddr */, |
|
1 /* priv */, |
|
0 /* Write */, |
|
1 /* Cacheable */, |
|
0 /* No ALIAS */, |
|
1 /* valid */, |
|
0 /* IE */); |
|
data |= TLB_L|TLB_NFO; |
|
pmap_enter_kpage(NULL, data); |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("Done inserting mesgbuf into pmap_kernel()\r\n"); |
prom_printf("Done inserting mesgbuf into pmap_kernel()\r\n"); |
#endif |
#endif |
Line 993 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1164 pmap_bootstrap(kernelstart, kernelend, m |
|
if (prom_map[i].vstart && ((prom_map[i].vstart>>32) == 0)) |
if (prom_map[i].vstart && ((prom_map[i].vstart>>32) == 0)) |
for (j = 0; j < prom_map[i].vsize; j += NBPG) { |
for (j = 0; j < prom_map[i].vsize; j += NBPG) { |
int k; |
int k; |
paddr_t newp; |
|
|
|
for (k = 0; page_size_map[k].mask; k++) { |
for (k = 0; page_size_map[k].mask; k++) { |
if (((prom_map[i].vstart | |
if (((prom_map[i].vstart | |
Line 1007 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1177 pmap_bootstrap(kernelstart, kernelend, m |
|
page_size_map[k].use++; |
page_size_map[k].use++; |
#endif |
#endif |
/* Enter PROM map into pmap_kernel() */ |
/* Enter PROM map into pmap_kernel() */ |
newp = NULL; |
pmap_enter_kpage(prom_map[i].vstart + j, |
while (pseg_set(pmap_kernel(), |
(prom_map[i].tte + j)| |
prom_map[i].vstart + j, |
page_size_map[k].code); |
(prom_map[i].tte + j) | |
|
page_size_map[k].code, newp) != NULL) { |
|
pmap_get_page(&newp); |
|
pmap_zero_page(newp); |
|
#ifdef DEBUG |
|
enter_stats.ptpneeded++; |
|
#endif |
|
} |
|
} |
} |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf("Done inserting PROM mappings into pmap_kernel()\r\n"); |
prom_printf("Done inserting PROM mappings into pmap_kernel()\r\n"); |
Line 1026 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1188 pmap_bootstrap(kernelstart, kernelend, m |
|
/* |
/* |
* Fix up start of kernel heap. |
* Fix up start of kernel heap. |
*/ |
*/ |
vmmap = (caddr_t)(ksegv + 4*MEG); /* Start after our locked TLB entry */ |
vmmap = (vaddr_t)(kdata + 4*MEG); /* Start after our locked TLB entry */ |
/* Let's keep 1 page of redzone after the kernel */ |
/* Let's keep 1 page of redzone after the kernel */ |
vmmap += NBPG; |
vmmap += NBPG; |
/* Allocate some VAs for u0 */ |
|
{ |
{ |
extern vaddr_t u0[2]; |
extern vaddr_t u0[2]; |
|
extern struct pcb* proc0paddr; |
|
extern void main __P((void)); |
paddr_t pa; |
paddr_t pa; |
|
|
|
/* Initialize all the pointers to u0 */ |
|
cpcb = (struct pcb *)vmmap; |
|
proc0paddr = cpcb; |
u0[0] = vmmap; |
u0[0] = vmmap; |
|
/* Allocate some VAs for u0 */ |
u0[1] = vmmap + 2*USPACE; |
u0[1] = vmmap + 2*USPACE; |
|
|
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Inserting stack 0 into pmap_kernel() at %p\r\n", vmmap); |
|
#endif |
|
|
while (vmmap < u0[1]) { |
while (vmmap < u0[1]) { |
pte_t tte; |
int64_t data; |
vaddr_t va = (vaddr_t)vmmap; |
vaddr_t va = (vaddr_t)vmmap; |
paddr_t newp; |
|
|
|
pmap_get_page(&pa); |
pmap_get_page(&pa); |
prom_map_phys(pa, NBPG, va, -1); |
prom_map_phys(pa, NBPG, va, -1); |
#ifdef NO_VCACHE |
data = TSB_DATA(0 /* global */, |
tte.data.data = TSB_DATA(0 /* global */, |
|
TLB_8K, |
TLB_8K, |
pa, |
pa, |
1 /* priv */, |
1 /* priv */, |
1 /* Write */, |
1 /* Write */, |
1 /* Cacheable */, |
1 /* Cacheable */, |
1 /* ALIAS -- Disable D$ */, |
FORCE_ALIAS /* ALIAS -- Disable D$ */, |
1 /* valid */, |
1 /* valid */, |
0 /* IE */); |
0 /* IE */); |
#else |
pmap_enter_kpage(va, data); |
tte.data.data = TSB_DATA(0 /* global */, |
vmmap += NBPG; |
|
} |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Done inserting stack 0 into pmap_kernel()\r\n"); |
|
#endif |
|
|
|
/* Now map in and initialize our cpu_info structure */ |
|
#ifdef DIAGNOSTIC |
|
vmmap += NBPG; /* redzone -- XXXX do we need one? */ |
|
#endif |
|
if ((vmmap ^ CPUINFO_VA) & VA_ALIAS_MASK) |
|
vmmap += NBPG; /* Matchup virtual color for D$ */ |
|
cpus = (struct cpu_info *)vmmap; |
|
|
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Inserting cpu_info into pmap_kernel() at %p\r\n", cpus); |
|
#endif |
|
/* Now map in all 8 pages of cpu_info */ |
|
pa = cpu0paddr; |
|
for (i=0; i<8; i++) { |
|
int64_t data; |
|
vaddr_t va = (vaddr_t)vmmap; |
|
|
|
prom_map_phys(pa, NBPG, va, -1); |
|
data = TSB_DATA(0 /* global */, |
TLB_8K, |
TLB_8K, |
pa, |
pa, |
1 /* priv */, |
1 /* priv */, |
1 /* Write */, |
1 /* Write */, |
1 /* Cacheable */, |
1 /* Cacheable */, |
0 /* No ALIAS */, |
FORCE_ALIAS /* ALIAS -- Disable D$ */, |
1 /* valid */, |
1 /* valid */, |
0 /* IE */); |
0 /* IE */); |
|
pmap_enter_kpage(va, data); |
|
vmmap += NBPG; |
|
pa += NBPG; |
|
} |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Initializing cpu_info\r\n"); |
#endif |
#endif |
newp = NULL; |
|
while (pseg_set(pmap_kernel(), va, tte.data.data, newp) |
/* Initialize our cpu_info structure */ |
!= NULL) { |
bzero(cpus, 8*NBPG); |
pmap_get_page(&newp); |
cpus->ci_next = NULL; /* Redundant, I know. */ |
pmap_zero_page(newp); |
cpus->ci_curproc = &proc0; |
#ifdef DEBUG |
cpus->ci_cpcb = (struct pcb *)u0[0]; /* Need better source */ |
enter_stats.ptpneeded ++; |
cpus->ci_upaid = CPU_UPAID; |
#endif |
cpus->ci_number = cpus->ci_upaid; /* How do we figure this out? */ |
|
cpus->ci_fpproc = NULL; |
|
cpus->ci_spinup = main; /* Call main when we're running. */ |
|
cpus->ci_initstack = (void *)u0[1]; |
|
cpus->ci_paddr = cpu0paddr; |
|
/* The rest will be done at CPU attach time. */ |
#ifdef BOOT1_DEBUG |
#ifdef BOOT1_DEBUG |
prom_printf( |
prom_printf("Done inserting cpu_info into pmap_kernel()\r\n"); |
"pseg_set: pm=%p va=%p data=%lx newp %lx\r\n", |
|
pmap_kernel(), va, (long)tte.data. |
|
data, (long)newp); |
|
{int i; for (i=0; i<140000000; i++) ;} |
|
#endif |
#endif |
} |
|
vmmap += NBPG; |
|
} |
|
} |
} |
/* |
/* |
* Set up bounds of allocatable memory for vmstat et al. |
* Set up bounds of allocatable memory for vmstat et al. |
Line 1094 pmap_bootstrap(kernelstart, kernelend, m |
|
Line 1291 pmap_bootstrap(kernelstart, kernelend, m |
|
#ifdef DEBUG |
#ifdef DEBUG |
pmapdebug = opmapdebug; |
pmapdebug = opmapdebug; |
#endif |
#endif |
|
#ifdef BOOT1_DEBUG |
|
prom_printf("Finished pmap_bootstrap()\r\n"); |
|
#endif |
|
|
} |
} |
|
|
|
|
if (va == 0) |
if (va == 0) |
panic("cpu_start: no memory"); |
panic("cpu_start: no memory"); |
|
|
pv_table = va; |
pv_table = (struct pv_entry *)va; |
m = TAILQ_FIRST(&mlist); |
m = TAILQ_FIRST(&mlist); |
pa = VM_PAGE_TO_PHYS(m); |
|
pte = TSB_DATA(0 /* global */, |
|
pagesize, |
|
pa, |
|
1 /* priv */, |
|
1 /* Write */, |
|
1 /* Cacheable */, |
|
1 /* ALIAS -- Disable D$ */, |
|
1 /* valid */, |
|
0 /* IE */); |
|
|
|
/* Map the pages */ |
/* Map the pages */ |
for (; m != NULL; m = TAILQ_NEXT(m,pageq)) { |
for (; m != NULL; m = TAILQ_NEXT(m,pageq)) { |
paddr_t newp; |
|
u_int64_t data; |
u_int64_t data; |
|
|
pa = VM_PAGE_TO_PHYS(m); |
pa = VM_PAGE_TO_PHYS(m); |
pmap_zero_page(pa); |
pmap_zero_page(pa); |
#ifdef NO_VCACHE |
|
data = TSB_DATA(0 /* global */, |
|
TLB_8K, |
|
pa, |
|
1 /* priv */, |
|
1 /* Write */, |
|
1 /* Cacheable */, |
|
1 /* ALIAS -- Disable D$ */, |
|
1 /* valid */, |
|
0 /* IE */); |
|
#else |
|
data = TSB_DATA(0 /* global */, |
data = TSB_DATA(0 /* global */, |
TLB_8K, |
TLB_8K, |
pa, |
pa, |
1 /* priv */, |
1 /* priv */, |
1 /* Write */, |
1 /* Write */, |
1 /* Cacheable */, |
1 /* Cacheable */, |
0 /* No ALIAS */, |
FORCE_ALIAS /* ALIAS -- Disable D$ */, |
1 /* valid */, |
1 /* valid */, |
0 /* IE */); |
0 /* IE */); |
#endif |
pmap_enter_kpage(va, data); |
newp = NULL; |
|
while (pseg_set(pmap_kernel(), va, data, newp) |
|
!= NULL) { |
|
pmap_get_page(&newp); |
|
pmap_zero_page(newp); |
|
#ifdef DEBUG |
|
enter_stats.ptpneeded ++; |
|
#endif |
|
#ifdef BOOT1_DEBUG |
|
prom_printf( |
|
"pseg_set: pm=%p va=%p data=%lx newp %lx\r\n", |
|
pmap_kernel(), va, (long)data, (long)newp); |
|
{int i; for (i=0; i<140000000; i++) ;} |
|
#endif |
|
} |
|
va += NBPG; |
va += NBPG; |
} |
} |
pmap_initialized = 1; |
pmap_initialized = 1; |
Line 1680 pmap_kremove(va, size) |
|
Line 1843 pmap_kremove(va, size) |
|
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if( pm == pmap_kernel() && va >= ksegv && va < ksegv+4*MEG ) |
if (pm == pmap_kernel() && (va >= ktext && va < kdata+4*MEG)) |
panic("pmap_kremove: va=%08x in locked TLB\r\n", va); |
panic("pmap_kremove: va=%08x in locked TLB\r\n", va); |
#endif |
#endif |
/* Shouldn't need to do this if the entry's not valid. */ |
/* Shouldn't need to do this if the entry's not valid. */ |
Line 1766 pmap_enter(pm, va, pa, prot, flags) |
|
Line 1929 pmap_enter(pm, va, pa, prot, flags) |
|
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if (pm == pmap_kernel() && va >= ksegv && va < ksegv+4*MEG) { |
if (pm == pmap_kernel() && va >= ktext && va < kdata+4*MEG) { |
prom_printf("pmap_enter: va=%08x pa=%x:%08x in locked TLB\r\n", |
prom_printf("pmap_enter: va=%08x pa=%x:%08x in locked TLB\r\n", |
va, (int)(pa>>32), (int)pa); |
va, (int)(pa>>32), (int)pa); |
OF_enter(); |
OF_enter(); |
Line 2084 pmap_remove(pm, va, endva) |
|
Line 2247 pmap_remove(pm, va, endva) |
|
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
if( pm == pmap_kernel() && va >= ksegv && va < ksegv+4*MEG ) |
if( pm == pmap_kernel() && va >= ktext && va < kdata+4*MEG ) |
panic("pmap_remove: va=%08x in locked TLB\r\n", va); |
panic("pmap_remove: va=%08x in locked TLB\r\n", va); |
#endif |
#endif |
/* We don't really need to do this if the valid bit is not set... */ |
/* We don't really need to do this if the valid bit is not set... */ |
Line 2189 pmap_protect(pm, sva, eva, prot) |
|
Line 2352 pmap_protect(pm, sva, eva, prot) |
|
/* |
/* |
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
if( pm == pmap_kernel() && sva >= ksegv && sva < ksegv+4*MEG ) { |
if( pm == pmap_kernel() && sva >= ktext && sva < kdata+4*MEG ) { |
prom_printf("pmap_protect: va=%08x in locked TLB\r\n", sva); |
prom_printf("pmap_protect: va=%08x in locked TLB\r\n", sva); |
OF_enter(); |
OF_enter(); |
return; |
return; |
Line 2251 pmap_extract(pm, va, pap) |
|
Line 2414 pmap_extract(pm, va, pap) |
|
{ |
{ |
paddr_t pa; |
paddr_t pa; |
|
|
if( pm == pmap_kernel() && va >= ksegv && va < ksegv+4*MEG ) { |
if( pm == pmap_kernel() && va >= kdata && va < kdata+4*MEG ) { |
|
/* Need to deal w/locked TLB entry specially. */ |
|
pa = (paddr_t) (kdata - kdata + va); |
|
#ifdef DEBUG |
|
if (pmapdebug & PDB_EXTRACT) { |
|
printf("pmap_extract: va=%x pa=%lx\n", va, (long)pa); |
|
} |
|
#endif |
|
} else if( pm == pmap_kernel() && va >= ktext && va < ektext ) { |
/* Need to deal w/locked TLB entry specially. */ |
/* Need to deal w/locked TLB entry specially. */ |
pa = (paddr_t) (ksegp - ksegv + va); |
pa = (paddr_t) (ktextp - ktext + va); |
#ifdef DEBUG |
#ifdef DEBUG |
if (pmapdebug & PDB_EXTRACT) { |
if (pmapdebug & PDB_EXTRACT) { |
printf("pmap_extract: va=%x pa=%lx\n", va, (long)pa); |
printf("pmap_extract: va=%x pa=%lx\n", va, (long)pa); |
|
|
/* |
/* |
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
if( pm == pmap_kernel() && sva >= ksegv && sva < ksegv+4*MEG ) { |
if( pm == pmap_kernel() && sva >= ktext && sva < kdata+4*MEG ) { |
prom_printf("pmap_changeprot: va=%08x in locked TLB\r\n", sva); |
prom_printf("pmap_changeprot: va=%08x in locked TLB\r\n", sva); |
OF_enter(); |
OF_enter(); |
return; |
return; |
Line 2463 pmap_dumpmmu(dump, blkno) |
|
Line 2634 pmap_dumpmmu(dump, blkno) |
|
kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t))); |
kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t))); |
kcpu->cputype = CPU_SUN4U; |
kcpu->cputype = CPU_SUN4U; |
kcpu->kernbase = KERNBASE; |
kcpu->kernbase = KERNBASE; |
kcpu->kphys = (paddr_t)ksegp; |
kcpu->kphys = (paddr_t)ktextp; |
kcpu->nmemseg = memsize; |
kcpu->nmemseg = memsize; |
kcpu->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t)); |
kcpu->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t)); |
kcpu->nsegmap = STSZ; |
kcpu->nsegmap = STSZ; |
Line 2935 pmap_unwire(pmap, va) |
|
Line 3106 pmap_unwire(pmap, va) |
|
/* |
/* |
* Is this part of the permanent 4MB mapping? |
* Is this part of the permanent 4MB mapping? |
*/ |
*/ |
if( pmap == pmap_kernel() && va >= ksegv && va < ksegv+4*MEG ) { |
if( pmap == pmap_kernel() && va >= ktext && va < kdata+4*MEG ) { |
prom_printf("pmap_unwire: va=%08x in locked TLB\r\n", va); |
prom_printf("pmap_unwire: va=%08x in locked TLB\r\n", va); |
OF_enter(); |
OF_enter(); |
return; |
return; |