Annotation of src/sys/arch/arm/include/cpufunc.h, Revision 1.79
1.45 matt 1: /* cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp */
1.1 reinoud 2:
3: /*
4: * Copyright (c) 1997 Mark Brinicombe.
5: * Copyright (c) 1997 Causality Limited
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Causality Limited.
19: * 4. The name of Causality Limited may not be used to endorse or promote
20: * products derived from this software without specific prior written
21: * permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24: * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26: * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33: * SUCH DAMAGE.
34: *
35: * RiscBSD kernel project
36: *
37: * cpufunc.h
38: *
39: * Prototypes for cpu, mmu and tlb related functions.
40: */
41:
1.65 matt 42: #ifndef _ARM_CPUFUNC_H_
43: #define _ARM_CPUFUNC_H_
1.1 reinoud 44:
1.79 ! ryo 45: #ifdef __arm__
! 46:
1.21 thorpej 47: #ifdef _KERNEL
48:
1.79 ! ryo 49: #if !defined(_RUMPKERNEL)
! 50:
1.1 reinoud 51: #include <sys/types.h>
1.79 ! ryo 52:
1.45 matt 53: #include <arm/armreg.h>
1.21 thorpej 54: #include <arm/cpuconf.h>
1.74 christos 55: #include <arm/cpufunc_proto.h>
1.1 reinoud 56:
57: struct cpu_functions {
58:
59: /* CPU functions */
1.32 uwe 60:
1.39 bjh21 61: u_int (*cf_id) (void);
62: void (*cf_cpwait) (void);
1.1 reinoud 63:
64: /* MMU functions */
65:
1.39 bjh21 66: u_int (*cf_control) (u_int, u_int);
67: void (*cf_domains) (u_int);
1.69 matt 68: #if defined(ARM_MMU_EXTENDED)
69: void (*cf_setttb) (u_int, tlb_asid_t);
70: #else
1.60 matt 71: void (*cf_setttb) (u_int, bool);
1.69 matt 72: #endif
1.39 bjh21 73: u_int (*cf_faultstatus) (void);
74: u_int (*cf_faultaddress) (void);
1.1 reinoud 75:
76: /* TLB functions */
77:
1.39 bjh21 78: void (*cf_tlb_flushID) (void);
1.66 matt 79: void (*cf_tlb_flushID_SE) (vaddr_t);
1.39 bjh21 80: void (*cf_tlb_flushI) (void);
1.66 matt 81: void (*cf_tlb_flushI_SE) (vaddr_t);
1.39 bjh21 82: void (*cf_tlb_flushD) (void);
1.66 matt 83: void (*cf_tlb_flushD_SE) (vaddr_t);
1.1 reinoud 84:
1.17 thorpej 85: /*
86: * Cache operations:
87: *
88: * We define the following primitives:
89: *
90: * icache_sync_all Synchronize I-cache
91: * icache_sync_range Synchronize I-cache range
92: *
93: * dcache_wbinv_all Write-back and Invalidate D-cache
94: * dcache_wbinv_range Write-back and Invalidate D-cache range
95: * dcache_inv_range Invalidate D-cache range
96: * dcache_wb_range Write-back D-cache range
97: *
98: * idcache_wbinv_all Write-back and Invalidate D-cache,
99: * Invalidate I-cache
100: * idcache_wbinv_range Write-back and Invalidate D-cache,
101: * Invalidate I-cache range
102: *
103: * Note that the ARM term for "write-back" is "clean". We use
104: * the term "write-back" since it's a more common way to describe
105: * the operation.
106: *
107: * There are some rules that must be followed:
108: *
109: * I-cache Synch (all or range):
110: * The goal is to synchronize the instruction stream,
111: * so you may beed to write-back dirty D-cache blocks
112: * first. If a range is requested, and you can't
113: * synchronize just a range, you have to hit the whole
114: * thing.
115: *
116: * D-cache Write-Back and Invalidate range:
117: * If you can't WB-Inv a range, you must WB-Inv the
118: * entire D-cache.
119: *
120: * D-cache Invalidate:
121: * If you can't Inv the D-cache, you must Write-Back
122: * and Invalidate. Code that uses this operation
123: * MUST NOT assume that the D-cache will not be written
124: * back to memory.
125: *
126: * D-cache Write-Back:
127: * If you can't Write-back without doing an Inv,
128: * that's fine. Then treat this as a WB-Inv.
129: * Skipping the invalidate is merely an optimization.
130: *
131: * All operations:
132: * Valid virtual addresses must be passed to each
133: * cache operation.
134: */
1.39 bjh21 135: void (*cf_icache_sync_all) (void);
136: void (*cf_icache_sync_range) (vaddr_t, vsize_t);
1.17 thorpej 137:
1.39 bjh21 138: void (*cf_dcache_wbinv_all) (void);
139: void (*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
140: void (*cf_dcache_inv_range) (vaddr_t, vsize_t);
141: void (*cf_dcache_wb_range) (vaddr_t, vsize_t);
1.1 reinoud 142:
1.59 matt 143: void (*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
144: void (*cf_sdcache_inv_range) (vaddr_t, paddr_t, psize_t);
145: void (*cf_sdcache_wb_range) (vaddr_t, paddr_t, psize_t);
146:
1.39 bjh21 147: void (*cf_idcache_wbinv_all) (void);
148: void (*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
1.1 reinoud 149:
150: /* Other functions */
151:
1.39 bjh21 152: void (*cf_flush_prefetchbuf) (void);
153: void (*cf_drain_writebuf) (void);
154: void (*cf_flush_brnchtgt_C) (void);
155: void (*cf_flush_brnchtgt_E) (u_int);
1.1 reinoud 156:
1.39 bjh21 157: void (*cf_sleep) (int mode);
1.1 reinoud 158:
159: /* Soft functions */
160:
1.39 bjh21 161: int (*cf_dataabt_fixup) (void *);
162: int (*cf_prefetchabt_fixup) (void *);
1.1 reinoud 163:
1.69 matt 164: #if defined(ARM_MMU_EXTENDED)
165: void (*cf_context_switch) (u_int, tlb_asid_t);
166: #else
1.41 scw 167: void (*cf_context_switch) (u_int);
1.69 matt 168: #endif
1.1 reinoud 169:
1.39 bjh21 170: void (*cf_setup) (char *);
1.1 reinoud 171: };
172:
173: extern struct cpu_functions cpufuncs;
174: extern u_int cputype;
175:
1.76 christos 176: #define cpu_idnum() cpufuncs.cf_id()
1.1 reinoud 177:
178: #define cpu_control(c, e) cpufuncs.cf_control(c, e)
179: #define cpu_domains(d) cpufuncs.cf_domains(d)
1.60 matt 180: #define cpu_setttb(t, f) cpufuncs.cf_setttb(t, f)
1.1 reinoud 181: #define cpu_faultstatus() cpufuncs.cf_faultstatus()
182: #define cpu_faultaddress() cpufuncs.cf_faultaddress()
183:
184: #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
185: #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
186: #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
187: #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
188: #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
189: #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
190:
1.17 thorpej 191: #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
192: #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
193:
194: #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
195: #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
196: #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
197: #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
198:
1.59 matt 199: #define cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
200: #define cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
201: #define cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
202:
1.17 thorpej 203: #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
204: #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
1.1 reinoud 205:
206: #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
207: #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
208: #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
209: #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
210:
211: #define cpu_sleep(m) cpufuncs.cf_sleep(m)
212:
213: #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
214: #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
1.7 wiz 215: #define ABORT_FIXUP_OK 0 /* fixup succeeded */
1.1 reinoud 216: #define ABORT_FIXUP_FAILED 1 /* fixup failed */
217: #define ABORT_FIXUP_RETURN 2 /* abort handler should return */
218:
1.41 scw 219: #define cpu_context_switch(a) cpufuncs.cf_context_switch(a)
1.1 reinoud 220: #define cpu_setup(a) cpufuncs.cf_setup(a)
221:
1.39 bjh21 222: int set_cpufuncs (void);
1.40 bjh21 223: int set_cpufuncs_id (u_int);
1.1 reinoud 224: #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
225: #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
226:
1.39 bjh21 227: void cpufunc_nullop (void);
228: int cpufunc_null_fixup (void *);
229: int early_abort_fixup (void *);
230: int late_abort_fixup (void *);
231: u_int cpufunc_id (void);
232: u_int cpufunc_control (u_int, u_int);
233: void cpufunc_domains (u_int);
234: u_int cpufunc_faultstatus (void);
235: u_int cpufunc_faultaddress (void);
1.3 bjh21 236:
1.1 reinoud 237: #define setttb cpu_setttb
238: #define drain_writebuf cpu_drain_writebuf
239:
1.73 matt 240:
241: #if defined(CPU_XSCALE)
242: #define cpu_cpwait() cpufuncs.cf_cpwait()
243: #endif
244:
1.43 chris 245: #ifndef cpu_cpwait
246: #define cpu_cpwait()
247: #endif
248:
1.1 reinoud 249: /*
250: * Macros for manipulating CPU interrupts
251: */
1.62 skrll 252: static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
253: static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
254: static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
1.25 briggs 255:
1.43 chris 256: static __inline uint32_t
257: __set_cpsr_c(uint32_t bic, uint32_t eor)
1.25 briggs 258: {
1.43 chris 259: uint32_t tmp, ret;
1.25 briggs 260:
1.36 perry 261: __asm volatile(
1.25 briggs 262: "mrs %0, cpsr\n" /* Get the CPSR */
263: "bic %1, %0, %2\n" /* Clear bits */
264: "eor %1, %1, %3\n" /* XOR bits */
265: "msr cpsr_c, %1\n" /* Set the control field of CPSR */
266: : "=&r" (ret), "=&r" (tmp)
1.31 rearnsha 267: : "r" (bic), "r" (eor) : "memory");
1.25 briggs 268:
269: return ret;
270: }
271:
1.43 chris 272: static __inline uint32_t
273: disable_interrupts(uint32_t mask)
274: {
275: uint32_t tmp, ret;
276: mask &= (I32_bit | F32_bit);
277:
278: __asm volatile(
279: "mrs %0, cpsr\n" /* Get the CPSR */
280: "orr %1, %0, %2\n" /* set bits */
281: "msr cpsr_c, %1\n" /* Set the control field of CPSR */
282: : "=&r" (ret), "=&r" (tmp)
283: : "r" (mask)
284: : "memory");
285:
286: return ret;
287: }
288:
289: static __inline uint32_t
290: enable_interrupts(uint32_t mask)
291: {
1.69 matt 292: uint32_t ret;
1.43 chris 293: mask &= (I32_bit | F32_bit);
294:
1.69 matt 295: /* Get the CPSR */
296: __asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
297: #ifdef _ARM_ARCH_6
298: if (__builtin_constant_p(mask)) {
299: switch (mask) {
300: case I32_bit | F32_bit:
301: __asm __volatile("cpsie\tif");
302: break;
303: case I32_bit:
304: __asm __volatile("cpsie\ti");
305: break;
306: case F32_bit:
307: __asm __volatile("cpsie\tf");
308: break;
309: default:
310: break;
311: }
312: return ret;
313: }
314: #endif /* _ARM_ARCH_6 */
315:
316: /* Set the control field of CPSR */
317: __asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
1.1 reinoud 318:
1.43 chris 319: return ret;
320: }
1.1 reinoud 321:
1.15 thorpej 322: #define restore_interrupts(old_cpsr) \
1.25 briggs 323: (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
1.45 matt 324:
325: static inline void cpsie(register_t psw) __attribute__((__unused__));
326: static inline register_t cpsid(register_t psw) __attribute__((__unused__));
327:
328: static inline void
329: cpsie(register_t psw)
330: {
1.49 matt 331: #ifdef _ARM_ARCH_6
1.45 matt 332: if (!__builtin_constant_p(psw)) {
333: enable_interrupts(psw);
334: return;
335: }
336: switch (psw & (I32_bit|F32_bit)) {
337: case I32_bit: __asm("cpsie\ti"); break;
338: case F32_bit: __asm("cpsie\tf"); break;
339: case I32_bit|F32_bit: __asm("cpsie\tif"); break;
340: }
1.48 cliff 341: #else
342: enable_interrupts(psw);
343: #endif
1.45 matt 344: }
345:
346: static inline register_t
347: cpsid(register_t psw)
348: {
1.49 matt 349: #ifdef _ARM_ARCH_6
1.45 matt 350: register_t oldpsw;
351: if (!__builtin_constant_p(psw))
352: return disable_interrupts(psw);
353:
354: __asm("mrs %0, cpsr" : "=r"(oldpsw));
355: switch (psw & (I32_bit|F32_bit)) {
356: case I32_bit: __asm("cpsid\ti"); break;
357: case F32_bit: __asm("cpsid\tf"); break;
358: case I32_bit|F32_bit: __asm("cpsid\tif"); break;
359: }
360: return oldpsw;
1.48 cliff 361: #else
362: return disable_interrupts(psw);
363: #endif
1.45 matt 364: }
365:
1.15 thorpej 366:
367: /* Functions to manipulate the CPSR. */
1.32 uwe 368: u_int SetCPSR(u_int, u_int);
1.15 thorpej 369: u_int GetCPSR(void);
1.1 reinoud 370:
371:
372: /*
373: * CPU functions from locore.S
374: */
375:
1.58 matt 376: void cpu_reset (void) __dead;
1.14 thorpej 377:
378: /*
379: * Cache info variables.
380: */
1.67 matt 381: #define CACHE_TYPE_VIVT 0
382: #define CACHE_TYPE_xxPT 1
383: #define CACHE_TYPE_VIPT 1
384: #define CACHE_TYPE_PIxx 2
385: #define CACHE_TYPE_PIPT 3
1.14 thorpej 386:
387: /* PRIMARY CACHE VARIABLES */
1.58 matt 388: struct arm_cache_info {
389: u_int icache_size;
390: u_int icache_line_size;
391: u_int icache_ways;
1.68 matt 392: u_int icache_way_size;
1.58 matt 393: u_int icache_sets;
394:
395: u_int dcache_size;
396: u_int dcache_line_size;
397: u_int dcache_ways;
1.68 matt 398: u_int dcache_way_size;
1.58 matt 399: u_int dcache_sets;
400:
1.69 matt 401: uint8_t cache_type;
1.58 matt 402: bool cache_unified;
1.67 matt 403: uint8_t icache_type;
404: uint8_t dcache_type;
1.58 matt 405: };
406:
1.77 mrg 407: #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
1.58 matt 408: extern u_int arm_cache_prefer_mask;
1.77 mrg 409: #endif
1.58 matt 410: extern u_int arm_dcache_align;
411: extern u_int arm_dcache_align_mask;
1.1 reinoud 412:
1.58 matt 413: extern struct arm_cache_info arm_pcache;
414: extern struct arm_cache_info arm_scache;
1.79 ! ryo 415:
! 416: #endif /* _GRRRRUMP */
! 417:
1.1 reinoud 418: #endif /* _KERNEL */
1.55 christos 419:
420: #if defined(_KERNEL) || defined(_KMEMUSER)
421: /*
422: * Miscellany
423: */
424:
425: int get_pc_str_offset (void);
426:
1.79 ! ryo 427: bool cpu_gtmr_exists_p(void);
! 428: u_int cpu_clusterid(void);
! 429: bool cpu_earlydevice_va_p(void);
! 430:
1.55 christos 431: /*
432: * Functions to manipulate cpu r13
433: * (in arm/arm32/setstack.S)
434: */
435:
436: void set_stackptr (u_int, u_int);
437: u_int get_stackptr (u_int);
438:
439: #endif /* _KERNEL || _KMEMUSER */
440:
1.79 ! ryo 441: #elif defined(__aarch64__)
! 442:
! 443: #include <aarch64/cpufunc.h>
! 444:
! 445: #endif /* __arm__/__aarch64__ */
! 446:
1.65 matt 447: #endif /* _ARM_CPUFUNC_H_ */
1.1 reinoud 448:
449: /* End of cpufunc.h */
CVSweb <webmaster@jp.NetBSD.org>