[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / atari / include

File: [cvs.NetBSD.org] / src / sys / arch / atari / include / pmap.h (download)

Revision 1.17, Wed Feb 18 02:05:33 1998 UTC (26 years, 2 months ago) by cgd
Branch: MAIN
Changes since 1.16: +3 -1 lines

Move pmap_map() function definition to MD headers, as appropriate.  It's
an internal function, and the VM system shouldn't try to prototype it.
(Note that some ports _don't_ prototype it.)

/*	$NetBSD: pmap.h,v 1.17 1998/02/18 02:05:33 cgd Exp $	*/

/* 
 * Copyright (c) 1987 Carnegie-Mellon University
 * Copyright (c) 1991 Regents of the University of California.
 * All rights reserved.
 *
 * This code is derived from software contributed to Berkeley by
 * the Systems Programming Group of the University of Utah Computer
 * Science Department.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *	This product includes software developed by the University of
 *	California, Berkeley and its contributors.
 * 4. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
 */

#ifndef	_MACHINE_PMAP_H_
#define	_MACHINE_PMAP_H_

/*
 * Pmap stuff
 */
struct pmap {
	pt_entry_t 		*pm_ptab;	/* KVA of page table */
	st_entry_t		*pm_stab;	/* KVA of segment table */
	int			pm_stfree;	/* 040: free lev2 blocks */
	u_int			*pm_stpa;	/* 040: ST phys. address */
	short			pm_sref;	/* segment table ref count */
	short			pm_count;	/* pmap reference count */
	long			pm_ptpages;	/* more stats: PT pages */
	simple_lock_data_t	pm_lock;	/* lock on pmap */
	struct pmap_statistics	pm_stats;	/* pmap statistics */
};

typedef struct pmap *pmap_t;

/*
 * On the 040 we keep track of which level 2 blocks are already in use
 * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
 * (block 31).  For convenience, the level 1 table is considered to be
 * block 0.
 *
 * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
 * for the kernel and users.  16 implies only the initial "segment table"
 * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
 * physically contiguous pages for the ST in pmap.c!
 */
#define	MAXKL2SIZE	32
#define MAXUL2SIZE	16
#define l2tobm(n)	(1 << (n))
#define	bmtol2(n)	(ffs(n) - 1)

/*
 * Macros for speed
 */
#define	PMAP_ACTIVATE(pmap, loadhw)					\
{									\
	if ((loadhw))							\
		loadustp(m68k_btop((vm_offset_t)(pmap)->pm_stpa));	\
}

/*
 * Description of the memory segments. Build in atari_init/start_c().
 * This gives a better separation between machine dependent stuff and
 * the pmap-module.
 */
#define	NMEM_SEGS	8
struct memseg {
	vm_offset_t	start;		/* PA of first page in segment	*/
	vm_offset_t	end;		/* PA of last  page in segment	*/
	int		first_page;	/* relative page# of 'start'	*/
};

/*
 * For each vm_page_t, there is a list of all currently valid virtual
 * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
 */
typedef struct pv_entry {
	struct pv_entry	*pv_next;	/* next pv_entry */
	struct pmap	*pv_pmap;	/* pmap where mapping lies */
	vm_offset_t	pv_va;		/* virtual address for mapping */
	u_int		*pv_ptste;	/* non-zero if VA maps a PT page */
	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
	int		pv_flags;	/* flags */
} *pv_entry_t;

#define	PV_CI		0x01	/* all entries must be cache inhibited */
#define PV_PTPAGE	0x02	/* entry maps a page table page */

struct pv_page;

struct pv_page_info {
	TAILQ_ENTRY(pv_page) pgi_list;
	struct pv_entry *pgi_freelist;
	int pgi_nfree;
};

/*
 * This is basically:
 * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
 */
#define	NPVPPG	340

struct pv_page {
	struct pv_page_info pvp_pgi;
	struct pv_entry pvp_pv[NPVPPG];
};

#ifdef	_KERNEL
/*
 * Memory segment descriptors.
 *  - boot_segs
 *	describes the segments obtainted from the bootcode. 
 *  - usable_segs
 *	describes the segments available after system requirements are
 *	substracted (reserved pages, etc...).
 */
struct memseg	boot_segs[NMEM_SEGS];
struct memseg	usable_segs[NMEM_SEGS];

pv_entry_t	pv_table;	/* array of entries, one per page */
u_int		*Sysmap;
char		*vmmap;		/* map for mem, dumps, etc. */
struct pmap	kernel_pmap_store;

#ifdef MACHINE_NONCONTIG
#define	pa_index(pa)			pmap_page_index(pa)
#else
#define pa_index(pa)			atop(pa - vm_first_phys)
#endif /* MACHINE_NONCONTIG */

#define pa_to_pvh(pa)			(&pv_table[pa_index(pa)])
#define	pmap_kernel()			(&kernel_pmap_store)
#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)

#define	active_user_pmap(pm) \
	(curproc && \
	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)

void	pmap_bootstrap __P((vm_offset_t, u_int, u_int));
void	pmap_changebit __P((vm_offset_t, int, boolean_t));

vm_offset_t	pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));

#endif	/* _KERNEL */

#endif	/* !_MACHINE_PMAP_H_ */