text
stringlengths
1
9.98k
__index_level_0__
int64
0
4.17k
We can achieve atomicity by disabling interrupts uint32_t intr_level; __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n" : "=r"(intr_level)); // Compare and set uint32_t old_value; old_value = *addr; if (old_value == compare_value) { *addr = new_value; } // Restore interrupts __asm__ __volatile__ ("memw \n" "wsr %0, ps\n" :: "r"(intr_level)); return (old_value == compare_value); #endif // XCHAL_HAVE_S32C1I } #ifdef __cplusplus } #endif
112
/* */ /* */ /* */ #ifndef XTENSA_CONFIG_H #define XTENSA_CONFIG_H #ifdef __cplusplus extern "C" { #endif #include #include #include /* required for XSHAL_CLIB */ #include "xtensa_context.h" /* /* Extra space required for interrupt/exception hooks. */ #ifdef XT_INTEXC_HOOKS #ifdef __XTENSA_CALL0_ABI__ #define STK_INTEXC_EXTRA 0x200 #else #define STK_INTEXC_EXTRA 0x180 #endif #else #define STK_INTEXC_EXTRA 0 #endif /* Check C library thread safety support and compute size of C library save area. For the supported libraries, we enable thread safety by default, and this can be overridden from the compiler/make command line. */ #if (XSHAL_CLIB == XTHAL_CLIB_NEWLIB) || (XSHAL_CLIB == XTHAL_CLIB_XCLIB) #ifndef XT_USE_THREAD_SAFE_CLIB #define XT_USE_THREAD_SAFE_CLIB 1 #endif #else #define XT_USE_THREAD_SAFE_CLIB 0 #endif #if XT_USE_THREAD_SAFE_CLIB > 0u #if XSHAL_CLIB == XTHAL_CLIB_XCLIB #define XT_HAVE_THREAD_SAFE_CLIB 1 #if !
113
defined __ASSEMBLER__ #include #define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16)) #define XT_CLIB_GLOBAL_PTR _reent_ptr #define _REENT_INIT_PTR _init_reent #define _impure_ptr _reent_ptr void _reclaim_reent(void * ptr); #endif #elif XSHAL_CLIB == XTHAL_CLIB_NEWLIB #define XT_HAVE_THREAD_SAFE_CLIB 1 #if !defined __ASSEMBLER__ #include #define XT_CLIB_CONTEXT_AREA_SIZE ((sizeof(struct _reent) + 15) + (-16)) #define XT_CLIB_GLOBAL_PTR _impure_ptr #endif #else #define XT_HAVE_THREAD_SAFE_CLIB 0 #error The selected C runtime library is not thread safe. #endif #else #define XT_CLIB_CONTEXT_AREA_SIZE 0 #endif /* Extra size -- interrupt frame plus coprocessor save area plus hook space. NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks. */ #ifdef __XTENSA_CALL0_ABI__ #define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE) #else #define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE) #endif /* Space allocated for user code -- function calls and local variables.
113
NOTE: This number can be adjusted to suit your needs. You must verify that the amount of space you reserve is adequate for the worst-case conditions in your application. NOTE: The windowed ABI requires more stack, since space has to be reserved for spilling register windows. */ #ifdef __XTENSA_CALL0_ABI__ #define XT_USER_SIZE 0x200 #else #define XT_USER_SIZE 0x400 #endif /* Minimum recommended stack size. */ #define XT_STACK_MIN_SIZE ((XT_XTRA_SIZE + XT_USER_SIZE) / sizeof(unsigned char)) /* OS overhead with and without C library thread context. */ #define XT_STACK_EXTRA (XT_XTRA_SIZE) #define XT_STACK_EXTRA_CLIB (XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE) #ifdef __cplusplus } #endif #endif /* XTENSA_CONFIG_H */
113
/* */ /* $Id: //depot/rel/Foxhill/dot.9/Xtensa/Software/libdb/xtensa-libdb-macros.h#1 $ */ /* Copyright (c) 2004-2008 Tensilica Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
114
*/ #ifndef __H_LIBDB_MACROS #define __H_LIBDB_MACROS /* */ #ifdef __cplusplus extern "C" { #endif #ifndef uint32 #define uint32 unsigned int #endif #ifndef int32 #define int32 int #endif /* */ #define XTENSA_DBREGN_INVALID -1 /* not a valid target number */ #define XTENSA_DBREGN_A(n) (0x0000+(n)) /* address registers a0..a15 */ #define XTENSA_DBREGN_B(n) (0x0010+(n)) /* boolean bits b0..b15 */ #define XTENSA_DBREGN_PC 0x0020 /* program counter */ /* 0x0021 RESERVED for use by Tensilica */ #define XTENSA_DBREGN_BO(n) (0x0022+(n)) /* boolean octuple-bits bo0..bo1 */ #define XTENSA_DBREGN_BQ(n) (0x0024+(n)) /* boolean quadruple-bits bq0..bq3 */ #define XTENSA_DBREGN_BD(n) (0x0028+(n)) /* boolean double-bits bd0..bd7 */ #define XTENSA_DBREGN_F(n) (0x0030+(n)) /* floating point registers f0..f15 */ #define XTENSA_DBREGN_VEC(n) (0x0040+(n)) /* Vectra vec regs v0..v15 */ #define XTENSA_DBREGN_VSEL(n) (0x0050+(n)) /* Vectra sel s0..s3 (V1) ..s7 (V2) */ #define XTENSA_DBREGN_VALIGN(n) (0x0058+(n)) /* Vectra valign regs u0.
114
.u3 */ #define XTENSA_DBREGN_VCOEFF(n) (0x005C+(n)) /* Vectra I vcoeff regs c0..c1 */ /* 0x005E..0x005F RESERVED for use by Tensilica */ #define XTENSA_DBREGN_AEP(n) (0x0060+(n)) /* HiFi2 Audio Engine regs aep0..aep7 */ #define XTENSA_DBREGN_AEQ(n) (0x0068+(n)) /* HiFi2 Audio Engine regs aeq0..aeq3 */ /* 0x006C..0x00FF RESERVED for use by Tensilica */ #define XTENSA_DBREGN_AR(n) (0x0100+(n)) /* physical address regs ar0..ar63 (note: only with window option) */ /* 0x0140..0x01FF RESERVED for use by Tensilica */ #define XTENSA_DBREGN_SREG(n) (0x0200+(n)) /* special registers 0..255 (core) */ #define XTENSA_DBREGN_BR XTENSA_DBREGN_SREG(0x04) /* all 16 boolean bits, BR */ #define XTENSA_DBREGN_MR(n) XTENSA_DBREGN_SREG(0x20+(n)) /* MAC16 registers m0..m3 */ #define XTENSA_DBREGN_UREG(n) (0x0300+(n)) /* user registers 0..255 (TIE) */ /* 0x0400..0x0FFF RESERVED for use by Tensilica */ /* 0x1000..0x1FFF user-defined regfiles */ /* 0x2000.
114
.0xEFFF other states (and regfiles) */ #define XTENSA_DBREGN_DBAGENT(n) (0xF000+(n)) /* non-processor "registers" 0..4095 for 3rd-party debugger agent defined use */ /* > 0xFFFF (32-bit) RESERVED for use by Tensilica */ /*#define XTENSA_DBREGN_CONTEXT(n) (0x02000000+((n)> 20) & 0x1F)*/ /* 0..31 context numbers */ #ifdef __cplusplus } #endif #endif /* __H_LIBDB_MACROS */
114
/* */ /* */ #ifndef XTENSA_CACHEASM_H #define XTENSA_CACHEASM_H #include #include #include #include /* / GENERIC -- ALL CACHES / /* */ /* */ .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab, loopokay, maxofs, awb=a0 // Number of indices in cache (lines per way): .set .Lindices, (\size / (\linesize * \assoc_or1)) // Number of indices processed per loop iteration (max 4): .set .Lperloop, .Lindices .ifgt .Lperloop - 4 .set .Lperloop, 4 .endif // Also limit instructions per loop if cache line size exceeds immediate range: .set .Lmaxperloop, (\maxofs / \linesize) + 1 .ifgt .Lperloop - .Lmaxperloop .set .Lperloop, .Lmaxperloop .endif // Avoid addi of 128 which takes two instructions (addmi,addi): .ifeq .Lperloop*\linesize - 128 .ifgt .Lperloop - 1 .set .Lperloop, .Lperloop / 2 .endif .endif // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst. // XCHAL_ERRATUM_497 - don't execute using loop, to reduce the amount of added code .
115
ifne (\loopokay & XCHAL_HAVE_LOOPS && !XCHAL_ERRATUM_497) movi \aa, .Lindices / .Lperloop // number of loop iterations // Possible improvement: need only loop if \aa > 1 ; // however \aa == 1 is highly unlikely. movi \ab, 0 // to iterate over cache loop \aa, .Lend_cachex\@ .set .Li, 0 ; .rept .Lperloop \cainst \ab, .Li*\linesize .set .Li, .Li+1 ; .endr addi \ab, \ab, .Lperloop*\linesize // move to next line .Lend_cachex\@: .else movi \aa, (\size / \assoc_or1) // Possible improvement: need only loop if \aa > 1 ; // however \aa == 1 is highly unlikely. movi \ab, 0 // to iterate over cache .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // don't use awb if set to a0 movi \awb, 0 .endif .Lstart_cachex\@: .set .Li, 0 ; .rept .Lperloop \cainst \ab, .Li*\linesize .set .Li, .Li+1 ; .endr .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // do memw after 8 cainst wb instructions addi \awb, \awb, .Lperloop blti \awb, 8, .Lstart_memw\@ memw movi \awb, 0 .Lstart_memw\@: .
115
endif addi \ab, \ab, .Lperloop*\linesize // move to next line bltu \ab, \aa, .Lstart_cachex\@ .endif .endm /* */ .macro cache_hit_region cainst, linesize_log2, addr, asize, askew, awb=a0 // Make \asize the number of iterations: extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr add \asize, \asize, \askew // ... and add it to \asize addi \asize, \asize, (1 0 ihi \ar, \offset // invalidate icache line icache_sync \ar #endif .endm /* */ .macro icache_invalidate_region astart, asize, ac #if XCHAL_ICACHE_SIZE > 0 // Instruction cache region invalidation: cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac icache_sync \ac // End of instruction cache region invalidation #endif .endm /* */ .macro icache_invalidate_all aa, ab, loopokay=1 #if XCHAL_ICACHE_SIZE > 0 // Instruction cache invalidation: cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab, \loopokay, 1020 icache_sync \aa // End of instruction cache invalidation #endif .
115
endm /* */ .macro icache_lock_line ar, offset #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE ipfl \ar, \offset /* prefetch and lock icache line */ icache_sync \ar #endif .endm /* */ .macro icache_lock_region astart, asize, ac #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE // Instruction cache region lock: cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac icache_sync \ac // End of instruction cache region lock #endif .endm /* */ .macro icache_unlock_line ar, offset #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE ihu \ar, \offset /* unlock icache line */ icache_sync \ar #endif .endm /* */ .macro icache_unlock_region astart, asize, ac #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE // Instruction cache region unlock: cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac icache_sync \ac // End of instruction cache region unlock #endif .endm /* */ .macro icache_unlock_all aa, ab, loopokay=1 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE // Instruction cache unlock: cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240 icache_sync \aa // End of instruction cache unlock #endif .
115
endm / DATA CACHE / /* */ .macro dcache_reset aa, ab, loopokay=0 dcache_unlock_all \aa, \ab, \loopokay dcache_invalidate_all \aa, \ab, \loopokay .endm /* */ .macro dcache_sync ar, wbtype=0 #if XCHAL_DCACHE_SIZE > 0 // No synchronization is needed. // (memw may be desired e.g. after writeback operation to help ensure subsequent // external accesses are seen to follow that writeback, however that's outside // the scope of this macro) //dsync .ifne (\wbtype & XCHAL_ERRATUM_497) memw .endif #endif .endm /* */ .macro cache_coherence_on ar at #if XCHAL_DCACHE_IS_COHERENT # if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0 /* Have MEMCTL. Enable snoop responses. */ rsr.memctl \ar movi \at, MEMCTL_SNOOP_EN or \ar, \ar, \at wsr.memctl \ar # elif XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX /* Opt into coherence for MX (for backward compatibility / testing). */ movi \ar, 1 movi \at, XER_CCON wer \ar, \at extw # endif #endif .endm /* */ .
115
macro cache_coherence_off ar at #if XCHAL_DCACHE_IS_COHERENT # if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0 /* Have MEMCTL. Disable snoop responses. */ rsr.memctl \ar movi \at, ~MEMCTL_SNOOP_EN and \ar, \ar, \at wsr.memctl \ar # elif XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX /* Opt out of coherence, for MX (for backward compatibility / testing). */ extw movi \at, 0 movi \ar, XER_CCON wer \at, \ar extw # endif #endif .endm /* */ .macro write_sync ar memw // ensure previous memory accesses are complete prior to subsequent memory accesses l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW //slot add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures) .endm /* */ .macro dcache_invalidate_line ar, offset #if XCHAL_DCACHE_SIZE > 0 dhi \ar, \offset dcache_sync \ar #endif .endm /* */ .macro dcache_invalidate_region astart, asize, ac #if XCHAL_DCACHE_SIZE > 0 // Data cache region invalidation: cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac dcache_sync \ac // End of data cache region invalidation #endif .
115
endm /* */ .macro dcache_invalidate_all aa, ab, loopokay=1 #if XCHAL_DCACHE_SIZE > 0 // Data cache invalidation: cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab, \loopokay, 1020 dcache_sync \aa // End of data cache invalidation #endif .endm /* */ .macro dcache_writeback_line ar, offset #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK dhwb \ar, \offset dcache_sync \ar, wbtype=1 #endif .endm /* */ .macro dcache_writeback_region astart, asize, ac, awb #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK // Data cache region writeback: cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac, \awb dcache_sync \ac, wbtype=1 // End of data cache region writeback #endif .endm /* */ .macro dcache_writeback_all aa, ab, awb, loopokay=1 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK // Data cache writeback: cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240, \awb, dcache_sync \aa, wbtype=1 // End of data cache writeback #endif .
115
endm /* */ .macro dcache_writeback_inv_line ar, offset #if XCHAL_DCACHE_SIZE > 0 dhwbi \ar, \offset /* writeback and invalidate dcache line */ dcache_sync \ar, wbtype=1 #endif .endm /* */ .macro dcache_writeback_inv_region astart, asize, ac, awb #if XCHAL_DCACHE_SIZE > 0 // Data cache region writeback and invalidate: cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac, \awb dcache_sync \ac, wbtype=1 // End of data cache region writeback and invalidate #endif .endm /* */ .macro dcache_writeback_inv_all aa, ab, awb, loopokay=1 #if XCHAL_DCACHE_SIZE > 0 // Data cache writeback and invalidate: #if XCHAL_DCACHE_IS_WRITEBACK cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240, \awb dcache_sync \aa, wbtype=1 #else /*writeback*/ // Data cache does not support writeback, so just invalidate: */ dcache_invalidate_all \aa, \ab, \loopokay #endif /*writeback*/ // End of data cache writeback and invalidate #endif .
115
endm /* */ .macro dcache_lock_line ar, offset #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE dpfl \ar, \offset /* prefetch and lock dcache line */ dcache_sync \ar #endif .endm /* */ .macro dcache_lock_region astart, asize, ac #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE // Data cache region lock: cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac dcache_sync \ac // End of data cache region lock #endif .endm /* */ .macro dcache_unlock_line ar, offset #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE dhu \ar, \offset /* unlock dcache line */ dcache_sync \ar #endif .endm /* */ .macro dcache_unlock_region astart, asize, ac #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE // Data cache region unlock: cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac dcache_sync \ac // End of data cache region unlock #endif .endm /* */ .macro dcache_unlock_all aa, ab, loopokay=1 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE // Data cache unlock: cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240 dcache_sync \aa // End of data cache unlock #endif .
115
endm /* */ .macro icache_get_ways aa #if XCHAL_ICACHE_SIZE > 0 #if XCHAL_HAVE_ICACHE_DYN_WAYS // Read from MEMCTL and shift/mask rsr.memctl \aa extui \aa, \aa, MEMCTL_ICWU_SHIFT, MEMCTL_ICWU_BITS blti \aa, XCHAL_ICACHE_WAYS, .Licgw movi \aa, XCHAL_ICACHE_WAYS .Licgw: #else // All ways are always enabled movi \aa, XCHAL_ICACHE_WAYS #endif #else // No icache movi \aa, 0 #endif .endm /* */ .macro icache_set_ways aa, ab, ac #if XCHAL_ICACHE_SIZE > 0 #if XCHAL_HAVE_ICACHE_DYN_WAYS movi \ac, MEMCTL_ICWU_CLR_MASK // set up to clear bits 18-22 rsr.memctl \ab and \ab, \ab, \ac movi \ac, MEMCTL_INV_EN // set bit 23 slli \aa, \aa, MEMCTL_ICWU_SHIFT // move to right spot or \ab, \ab, \aa or \ab, \ab, \ac wsr.memctl \ab isync #else // All ways are always enabled #endif #else // No icache #endif .endm /* */ .macro dcache_get_ways aa #if XCHAL_DCACHE_SIZE > 0 #if XCHAL_HAVE_DCACHE_DYN_WAYS // Read from MEMCTL and shift/mask rsr.memctl \aa extui \aa, \aa, MEMCTL_DCWU_SHIFT, MEMCTL_DCWU_BITS blti \aa, XCHAL_DCACHE_WAYS, .
115
Ldcgw movi \aa, XCHAL_DCACHE_WAYS .Ldcgw: #else // All ways are always enabled movi \aa, XCHAL_DCACHE_WAYS #endif #else // No dcache movi \aa, 0 #endif .endm /* */ .macro dcache_set_ways aa, ab, ac #if (XCHAL_DCACHE_SIZE > 0) && XCHAL_HAVE_DCACHE_DYN_WAYS movi \ac, MEMCTL_DCWA_CLR_MASK // set up to clear bits 13-17 rsr.memctl \ab and \ab, \ab, \ac // clear ways allocatable slli \ac, \aa, MEMCTL_DCWA_SHIFT or \ab, \ab, \ac // set ways allocatable wsr.memctl \ab #if XCHAL_DCACHE_IS_WRITEBACK // Check if the way count is increasing or decreasing extui \ac, \ab, MEMCTL_DCWU_SHIFT, MEMCTL_DCWU_BITS // bits 8-12 - ways in use bge \aa, \ac, .Ldsw3 // equal or increasing slli \ab, \aa, XCHAL_DCACHE_LINEWIDTH + XCHAL_DCACHE_SETWIDTH // start way number slli \ac, \ac, XCHAL_DCACHE_LINEWIDTH + XCHAL_DCACHE_SETWIDTH // end way number .Ldsw1: diwbui.p \ab // auto-increments ab bge \ab, \ac, .Ldsw2 beqz \ab, .Ldsw2 j .Ldsw1 .Ldsw2: rsr.memctl \ab #endif .Ldsw3: // No dirty data to write back, just set the new number of ways movi \ac, MEMCTL_DCWU_CLR_MASK // set up to clear bits 8-12 and \ab, \ab, \ac // clear ways in use movi \ac, MEMCTL_INV_EN or \ab, \ab, \ac // set bit 23 slli \aa, \aa, MEMCTL_DCWU_SHIFT or \ab, \ab, \aa // set ways in use wsr.
115
memctl \ab #else // No dcache or no way disable support #endif .endm #endif /*XTENSA_CACHEASM_H*/
115
/* $Id: //depot/dev/Foxhill/Xtensa/OS/include/xtensa/mpuasm.h#5 $ */ /* */ #ifndef _MPUASM_H_ #define _MPUASM_H_ #include /* */ .macro mpu_write_map a_map, a_num_entries, a_temp1, a_temp2, a_temp3, a_temp4 #if XCHAL_HAVE_MPU movi \a_temp1, 0 wsr.cacheadrdis \a_temp1 // enable the cache in all regions wsr.mpuenb \a_temp1 // disable all foreground entries // Clear out the unused entries. // // Currently we are clearing out all the entries because currently // the entries must be ordered even if they are all disabled. // If out of order entries were permitted when all are disabled, // performance could be improved by clearing XCHAL_MPU_ENTRIES - n // (n = number of entries) rather than XCHAL_MPU_ENTRIES - 1 entries. // movi \a_temp2, 0 movi \a_temp3, XCHAL_MPU_ENTRIES - 1 j 1f .align 16 // this alignment is done to ensure that 1: memw // todo currently wptlb must be preceeded by a memw. The instructions must // be aligned to ensure that both are in the same cache line.
116
These statements should be // properly conditionalized when that restriction is removed from the HW wptlb \a_temp2, \a_temp1 addi \a_temp2, \a_temp2, 1 bltu \a_temp2, \a_temp3, 1b // Write the new entries. // beqz \a_num_entries, 4f // if no entries, skip loop addx8 \a_map, \a_num_entries, \a_map // compute end of provided map j 3f .align 16 2: memw // todo currently wptlb must be preceeded by a memw. The instructions must // be aligned to ensure that both are in the same cache line. These statements should be // properly conditionalized when that restriction is removed from the HW wptlb \a_temp2, \a_temp4 addi \a_temp3, \a_temp3, -1 beqz \a_num_entries, 4f // loop until done 3: addi \a_map, \a_map, -8 l32i \a_temp2, \a_map, 4 // get at (acc.rights, memtype) l32i \a_temp4, \a_map, 0 // get as (vstart, valid) addi \a_num_entries, \a_num_entries, -1 extui \a_temp1, \a_temp2, 0, 5 // entry index portion xor \a_temp2, \a_temp2, \a_temp1 // zero it or \a_temp2, \a_temp2, \a_temp3 // set index = \a_temp3 j 2b 4: #endif .
116
endm /* */ .macro mpu_read_map a_map_ptr, a_temp1, a_temp2 #if XCHAL_HAVE_MPU movi \a_temp1, XCHAL_MPU_ENTRIES // set index to last entry + 1 addx8 \a_map_ptr, \a_temp1, \a_map_ptr // set map ptr to last entry + 1 1: addi \a_temp1, \a_temp1, -1 // decrement index addi \a_map_ptr, \a_map_ptr, -8 // decrement index rptlb0 \a_temp2, \a_temp1 // read 1/2 of entry s32i \a_temp2, \a_map_ptr, 0 // write 1/2 of entry rptlb1 \a_temp2, \a_temp1 s32i \a_temp2, \a_map_ptr, 4 bnez \a_temp1, 1b // loop until done #endif .endm #endif
116
/* */ /* $Id: //depot/rel/Foxhill/dot.9/Xtensa/OS/include/xtensa/coreasm.h#1 $ */ /* */ #ifndef XTENSA_COREASM_H #define XTENSA_COREASM_H /* */ #ifndef _ASMLANGUAGE /* conditionalize to avoid cpp warnings (3rd parties might use same macro) */ #define _ASMLANGUAGE #endif #include #include #include /* */ /* */ .macro find_ms_setbit ad, as, at, base #if XCHAL_HAVE_NSA movi \at, 31+\base nsau \as, \as // get index of \as, numbered from msbit (32 if absent) sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent) #else /* XCHAL_HAVE_NSA */ movi \at, \base // start with result of 0 (point to lsbit of 32) beqz \as, 2f // special case for zero argument: return -1 bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits) addi \at, \at, 16 // no, increment result to upper 16 bits (of 32) //srli \as, \as, 16 // check upper half (shift right 16 bits) extui \as, \as, 16, 16 // check upper half (shift right 16 bits) 1: bltui \as, 0x100, 1f // is it one of the 8 lsbits?
117
(if so, check lower 8 bits) addi \at, \at, 8 // no, increment result to upper 8 bits (of 16) srli \as, \as, 8 // shift right to check upper 8 bits 1: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits) addi \at, \at, 4 // no, increment result to upper 4 bits (of 8) srli \as, \as, 4 // shift right 4 bits to check upper half 1: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits) addi \at, \at, 2 // no, increment result to upper 2 bits (of 4) srli \as, \as, 2 // shift right 2 bits to check upper half 1: bltui \as, 0x2, 1f // is it the lsbit? addi \at, \at, 2 // no, increment result to upper bit (of 2) 2: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1) //srli \as, \as, 1 1: // done! \at contains index of msbit set (or -1 if none set) .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15) mov \ad, \at // then move result to \ad .endif #endif /* XCHAL_HAVE_NSA */ .endm // find_ms_setbit /* */ .
117
macro find_ls_setbit ad, as, at, base neg \at, \as // keep only the least-significant bit that is set... and \as, \at, \as // ... in \as find_ms_setbit \ad, \as, \at, \base .endm // find_ls_setbit /* */ .macro find_ls_one ad, as find_ls_setbit \ad, \as, \ad, 0 .endm // find_ls_one /* */ /* Named label version of the macros: */ .macro floop ar, endlabel floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel .endm .macro floopnez ar, endlabel floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel .endm .macro floopgtz ar, endlabel floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel .endm .macro floopend ar, endlabel floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel .endm /* Numbered local label version of the macros: */ #if 0 /*UNTESTED*/ .macro floop89 ar floop_ \ar, 8, 9f .endm .macro floopnez89 ar floopnez_ \ar, 8, 9f .endm .macro floopgtz89 ar floopgtz_ \ar, 8, 9f .endm .macro floopend89 ar floopend_ \ar, 8b, 9 .
117
endm #endif /*0*/ /* Underlying version of the macros: */ .macro floop_ ar, startlabel, endlabelref .ifdef _infloop_ .if _infloop_ .err // Error: floop cannot be nested .endif .endif .set _infloop_, 1 #if XCHAL_HAVE_LOOPS loop \ar, \endlabelref #else /* XCHAL_HAVE_LOOPS */ \startlabel: addi \ar, \ar, -1 #endif /* XCHAL_HAVE_LOOPS */ .endm // floop_ .macro floopnez_ ar, startlabel, endlabelref .ifdef _infloop_ .if _infloop_ .err // Error: floopnez cannot be nested .endif .endif .set _infloop_, 1 #if XCHAL_HAVE_LOOPS loopnez \ar, \endlabelref #else /* XCHAL_HAVE_LOOPS */ beqz \ar, \endlabelref \startlabel: addi \ar, \ar, -1 #endif /* XCHAL_HAVE_LOOPS */ .endm // floopnez_ .macro floopgtz_ ar, startlabel, endlabelref .ifdef _infloop_ .if _infloop_ .err // Error: floopgtz cannot be nested .endif .endif .set _infloop_, 1 #if XCHAL_HAVE_LOOPS loopgtz \ar, \endlabelref #else /* XCHAL_HAVE_LOOPS */ bltz \ar, \endlabelref beqz \ar, \endlabelref \startlabel: addi \ar, \ar, -1 #endif /* XCHAL_HAVE_LOOPS */ .
117
endm // floopgtz_ .macro floopend_ ar, startlabelref, endlabel .ifndef _infloop_ .err // Error: floopend without matching floopXXX .endif .ifeq _infloop_ .err // Error: floopend without matching floopXXX .endif .set _infloop_, 0 #if ! XCHAL_HAVE_LOOPS bnez \ar, \startlabelref #endif /* XCHAL_HAVE_LOOPS */ \endlabel: .endm // floopend_ /* */ .macro crsil ar, newlevel #if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS rsil \ar, \newlevel #else rsr.ps \ar #endif .endm // crsil /* */ .macro safe_movi_a0 constant #if XCHAL_HAVE_ABSOLUTE_LITERALS /* Contort a PC-relative literal load even though we may be in litbase-relative mode: */ j 1f .begin no-transform // ensure what follows is assembled exactly as-is .align 4 // ensure constant and call0 target ... .byte 0 // ... are 4-byte aligned (call0 instruction is 3 bytes long) 1: call0 2f // read PC (that follows call0) in a0 .long \constant // 32-bit constant to load into a0 2: .end no-transform l32i a0, a0, 0 // load constant #else movi a0, \constant // no LITBASE, can assume PC-relative L32R #endif .
117
endm /* */ .macro window_spill4 #if XCHAL_HAVE_WINDOWED # if XCHAL_NUM_AREGS == 16 movi a15, 0 // for 16-register files, no need to call to reach the end # elif XCHAL_NUM_AREGS == 32 call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers # elif XCHAL_NUM_AREGS == 64 call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers # endif #endif .endm // window_spill4 .macro window_spill8 #if XCHAL_HAVE_WINDOWED # if XCHAL_NUM_AREGS == 16 movi a15, 0 // for 16-register files, no need to call to reach the end # elif XCHAL_NUM_AREGS == 32 call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers # elif XCHAL_NUM_AREGS == 64 call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers # endif #endif .endm // window_spill8 .macro window_spill12 #if XCHAL_HAVE_WINDOWED # if XCHAL_NUM_AREGS == 16 movi a15, 0 // for 16-register files, no need to call to reach the end # elif XCHAL_NUM_AREGS == 32 call12 .
117
L__wdwspill_assist20 // call deep enough to clear out any live callers # elif XCHAL_NUM_AREGS == 64 call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers # endif #endif .endm // window_spill12 /* */ .macro window_spill_function #if XCHAL_HAVE_WINDOWED # if XCHAL_NUM_AREGS == 32 entry sp, 48 bbci.l a0, 31, 1f // branch if called with call4 bbsi.l a0, 30, 2f // branch if called with call12 call8 .L__wdwspill_assist16 // called with call8, only need another 8 retw 1: call12 .L__wdwspill_assist16 // called with call4, only need another 12 retw 2: call4 .L__wdwspill_assist16 // called with call12, only need another 4 retw # elif XCHAL_NUM_AREGS == 64 entry sp, 48 bbci.l a0, 31, 1f // branch if called with call4 bbsi.l a0, 30, 2f // branch if called with call12 call4 .L__wdwspill_assist52 // called with call8, only need a call4 retw 1: call8 .L__wdwspill_assist52 // called with call4, only need a call8 retw 2: call12 .L__wdwspill_assist40 // called with call12, can skip a call12 retw # elif XCHAL_NUM_AREGS == 16 entry sp, 16 bbci.
117
l a0, 31, 1f // branch if called with call4 bbsi.l a0, 30, 2f // branch if called with call12 movi a7, 0 // called with call8 retw 1: movi a11, 0 // called with call4 2: retw // if called with call12, everything already spilled // movi a15, 0 // trick to spill all but the direct caller // j 1f // // The entry instruction is magical in the assembler (gets auto-aligned) // // so we have to jump to it to avoid falling through the padding. // // We need entry/retw to know where to return. //1: entry sp, 16 // retw # else # error "unrecognized address register file size" # endif #endif /* XCHAL_HAVE_WINDOWED */ window_spill_common .endm // window_spill_function /* */ .macro window_spill_common #if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64) .ifndef .L__wdwspill_defined # if XCHAL_NUM_AREGS >= 64 .L__wdwspill_assist60: entry sp, 32 call8 .L__wdwspill_assist52 retw .L__wdwspill_assist56: entry sp, 16 call4 .L__wdwspill_assist52 retw .
117
L__wdwspill_assist52: entry sp, 48 call12 .L__wdwspill_assist40 retw .L__wdwspill_assist40: entry sp, 48 call12 .L__wdwspill_assist28 retw # endif .L__wdwspill_assist28: entry sp, 48 call12 .L__wdwspill_assist16 retw .L__wdwspill_assist24: entry sp, 32 call8 .L__wdwspill_assist16 retw .L__wdwspill_assist20: entry sp, 16 call4 .L__wdwspill_assist16 retw .L__wdwspill_assist16: entry sp, 16 movi a15, 0 retw .set .L__wdwspill_defined, 1 .endif #endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */ .endm // window_spill_common /* .macro beqi32 ax, ay, imm, label .ifeq ((\imm-1) & ~7) // 1..8 ? beqi \ax, \imm, \label .else .ifeq (\imm+1) // -1 ? beqi \ax, \imm, \label .else .ifeq (\imm) // 0 ? beqz \ax, \label .else // We could also handle immediates 10,12,16,32,64,128,256 // but it would be a long macro... movi \ay, \imm beq \ax, \ay, \label .endif .endif .endif .endm // beqi32 /* */ .macro isync_retw_nop #if XCHAL_MAYHAVE_ERRATUM_XEA1KWIN nop #endif .
117
endm /* */ .macro isync_return_nop #ifdef __XTENSA_WINDOWED_ABI__ isync_retw_nop #endif .endm /* */ .macro isync_erratum453 #if XCHAL_ERRATUM_453 isync #endif .endm /* */ .macro readsr reg suf ar rsr.\reg\suf \ar .endm /* */ .macro writesr reg suf ar wsr.\reg\suf \ar .endm /* */ .macro xchgsr reg suf ar xsr.\reg\suf \ar .endm /* */ .macro INDEX_SR instr ar .ifeq (_idx) &instr&0 \ar .endif .ifeq (_idx-1) &instr&1 \ar .endif .ifeq (_idx-2) &instr&2 \ar .endif .ifeq (_idx-3) &instr&3 \ar .endif .ifeq (_idx-4) &instr&4 \ar .endif .ifeq (_idx-5) &instr&5 \ar .endif .ifeq (_idx-6) &instr&6 \ar .endif .ifeq (_idx-7) &instr&7 \ar .endif .endm /* */ #if !XCHAL_HAVE_ABS .macro abs arr, ars .ifc \arr, \ars //src equal dest is less efficient bgez \arr, 1f neg \arr, \arr 1: .else neg \arr, \ars movgez \arr, \ars, \ars .endif .endm #endif /* !XCHAL_HAVE_ABS */ /* #if !XCHAL_HAVE_ADDX .macro addx2 arr, ars, art .ifc \arr, \art .
117
ifc \arr, \ars // addx2 a, a, a (not common) .err .else add \arr, \ars, \art add \arr, \ars, \art .endif .else //addx2 a, b, c //addx2 a, a, b //addx2 a, b, b slli \arr, \ars, 1 add \arr, \arr, \art .endif .endm #endif /* !XCHAL_HAVE_ADDX */ /* #if !XCHAL_HAVE_ADDX .macro addx4 arr, ars, art .ifc \arr, \art .ifc \arr, \ars // addx4 a, a, a (not common) .err .else //# addx4 a, b, a add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art .endif .else //addx4 a, b, c //addx4 a, a, b //addx4 a, b, b slli \arr, \ars, 2 add \arr, \arr, \art .endif .endm #endif /* !XCHAL_HAVE_ADDX */ /* #if !XCHAL_HAVE_ADDX .macro addx8 arr, ars, art .ifc \arr, \art .ifc \arr, \ars //addx8 a, a, a (not common) .err .else //addx8 a, b, a add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art add \arr, \ars, \art .endif .else //addx8 a, b, c //addx8 a, a, b //addx8 a, b, b slli \arr, \ars, 3 add \arr, \arr, \art .
117
endif .endm #endif /* !XCHAL_HAVE_ADDX */ /* */ #if XCHAL_HAVE_XEA1 .macro rfe_rfue rfue .endm #elif XCHAL_HAVE_XEA2 .macro rfe_rfue rfe .endm #endif /* */ /* */ .macro abi_entry_size locsize=0, callsize=0 #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ .ifeq \callsize .set .callsz, 16 .else .ifeq \callsize-4 .set .callsz, 16 .else .ifeq \callsize-8 .set .callsz, 32 .else .ifeq \callsize-12 .set .callsz, 48 .else .error "abi_entry: invalid call size \callsize" .endif .endif .endif .endif .set .locsz, .callsz + ((\locsize + 15) & -16) #else .set .callsz, \callsize .if .callsz /* if calls, need space for return PC */ .set .locsz, (\locsize + 4 + 15) & -16 .else .set .locsz, (\locsize + 15) & -16 .endif #endif .endm .macro abi_entry locsize=0, callsize=0 .iflt \locsize .error "abi_entry: invalid negative size of locals (\locsize)" .endif abi_entry_size \locsize, \callsize #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ # define ABI_ENTRY_MINSIZE 3 /* size of abi_entry (no arguments) instructions in bytes */ .
117
ifgt .locsz - 32760 /* .locsz > 32760 (ENTRY's max range)? */ /* Funky computation to try to have assembler use addmi efficiently if possible: */ entry sp, 0x7F00 + (.locsz & 0xF0) addi a12, sp, - ((.locsz & -0x100) - 0x7F00) movsp sp, a12 .else entry sp, .locsz .endif #else # define ABI_ENTRY_MINSIZE 0 /* size of abi_entry (no arguments) instructions in bytes */ .if .locsz .ifle .locsz - 128 /* if locsz 128, with calls: */ movi a9, .locsz - 16 /* note: a9 is caller-saved */ addi sp, sp, -16 s32i a0, sp, 12 sub sp, sp, a9 .else /* locsz > 128, no calls: */ movi a9, .locsz sub sp, sp, a9 .endif /* end */ .endif #endif .endm /* */ .macro abi_return locsize=-1, callsize=0 .ifge \locsize abi_entry_size \locsize, \callsize .endif #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ retw #else .if .locsz .iflt .locsz - 128 /* if locsz = 128, with calls: */ addi a9, sp, .locsz - 16 l32i a0, a9, 12 addi sp, a9, 16 .else /* locsz >= 128, no calls: */ movi a9, .
117
locsz add sp, sp, a9 .endif /* end */ .endif ret #endif .endm /* */ .macro hw_erratum_487_fix #if defined XSHAL_ERRATUM_487_FIX isync #endif .endm /* */ #define _GBL(x) .global x #define _TYP(x) .type x,@function #define _ALN(x) .align x #define _SIZ(x) .size x, . - x #define _MKEND(x) .purgem endfunc ; .macro endfunc ; _SIZ(x) ; .purgem endfunc ; .macro endfunc ; .endm ; .endm #define _SYMT(x) _GBL(x); _MKEND(x); _TYP(x); _ALN(4); x: #define _SYM2(x) _GBL(x); _TYP(x); x: #define _SYM(x) _GBL(x); _MKEND(x); _ALN(4); x: .macro endfunc ; .endm /* */ #if defined(__NW_FUNCTION__) # define DECLFUNC(x) _SYMT(x ## _nw) #else # if defined (__XTENSA_CALL0_ABI__) # define DECLFUNC(x) _SYMT(x); _SYM2(x ## _nw) # else # define DECLFUNC(x) _SYMT(x) # endif #endif #endif /*XTENSA_COREASM_H*/
117
/* */ /* $Id: //depot/rel/Foxhill/dot.9/Xtensa/OS/include/xtensa/corebits.h#1 $ */ /* */ #ifndef XTENSA_COREBITS_H #define XTENSA_COREBITS_H /* EXCCAUSE register fields: */ #define EXCCAUSE_EXCCAUSE_SHIFT 0 #define EXCCAUSE_EXCCAUSE_MASK 0x3F /* EXCCAUSE register values: */ /* */ #define EXCCAUSE_ILLEGAL 0 /* Illegal Instruction */ #define EXCCAUSE_SYSCALL 1 /* System Call (SYSCALL instruction) */ #define EXCCAUSE_INSTR_ERROR 2 /* Instruction Fetch Error */ # define EXCCAUSE_IFETCHERROR 2 /* (backward compatibility macro, deprecated, avoid) */ #define EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error */ # define EXCCAUSE_LOADSTOREERROR 3 /* (backward compatibility macro, deprecated, avoid) */ #define EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt */ # define EXCCAUSE_LEVEL1INTERRUPT 4 /* (backward compatibility macro, deprecated, avoid) */ #define EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (MOVSP instruction) for alloca */ #define EXCCAUSE_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero */ # define EXCCAUSE_SPECULATION 7 /* Use of Failed Speculative Access (deprecated) */ #define EXCCAUSE_PC_ERROR 7 /* Next PC Value Illegal */ #define EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction */ #define EXCCAUSE_UNALIGNED 9 /* Unaligned Load or Store */ #define EXCCAUSE_EXTREG_PRIVILEGE 10 /* External Register Privilege Error */ #define EXCCAUSE_EXCLUSIVE_ERROR 11 /* Load exclusive to unsupported memory type or unaligned address */ #define EXCCAUSE_INSTR_DATA_ERROR 12 /* PIF Data Error on Instruction Fetch (RB-200x and later) */ #define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 /* PIF Data Error on Load or Store (RB-200x and later) */ #define EXCCAUSE_INSTR_ADDR_ERROR 14 /* PIF Address Error on Instruction Fetch (RB-200x and later) */ #define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 /* PIF Address Error on Load or Store (RB-200x and later) */ #define EXCCAUSE_ITLB_MISS 16 /* ITLB Miss (no ITLB entry matches, hw refill also missed) */ #define EXCCAUSE_ITLB_MULTIHIT 17 /* ITLB Multihit (multiple ITLB entries match) */ #define EXCCAUSE_INSTR_RING 18 /* Ring Privilege Violation on Instruction Fetch */ /* Reserved 19 */ /* Size Restriction on IFetch (not implemented) */ #define EXCCAUSE_INSTR_PROHIBITED 20 /* Cache Attribute does not allow Instruction Fetch */ /* Reserved 21.
118
.23 */ #define EXCCAUSE_DTLB_MISS 24 /* DTLB Miss (no DTLB entry matches, hw refill also missed) */ #define EXCCAUSE_DTLB_MULTIHIT 25 /* DTLB Multihit (multiple DTLB entries match) */ #define EXCCAUSE_LOAD_STORE_RING 26 /* Ring Privilege Violation on Load or Store */ /* Reserved 27 */ /* Size Restriction on Load/Store (not implemented) */ #define EXCCAUSE_LOAD_PROHIBITED 28 /* Cache Attribute does not allow Load */ #define EXCCAUSE_STORE_PROHIBITED 29 /* Cache Attribute does not allow Store */ /* Reserved 30..31 */ #define EXCCAUSE_CP_DISABLED(n) (32+(n)) /* Access to Coprocessor 'n' when disabled */ #define EXCCAUSE_CP0_DISABLED 32 /* Access to Coprocessor 0 when disabled */ #define EXCCAUSE_CP1_DISABLED 33 /* Access to Coprocessor 1 when disabled */ #define EXCCAUSE_CP2_DISABLED 34 /* Access to Coprocessor 2 when disabled */ #define EXCCAUSE_CP3_DISABLED 35 /* Access to Coprocessor 3 when disabled */ #define EXCCAUSE_CP4_DISABLED 36 /* Access to Coprocessor 4 when disabled */ #define EXCCAUSE_CP5_DISABLED 37 /* Access to Coprocessor 5 when disabled */ #define EXCCAUSE_CP6_DISABLED 38 /* Access to Coprocessor 6 when disabled */ #define EXCCAUSE_CP7_DISABLED 39 /* Access to Coprocessor 7 when disabled */ /* Reserved 40.
118
.63 */ /* PS register fields: */ #define PS_WOE_SHIFT 18 #define PS_WOE_MASK 0x00040000 #define PS_WOE PS_WOE_MASK #define PS_CALLINC_SHIFT 16 #define PS_CALLINC_MASK 0x00030000 #define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */ #define PS_OWB_SHIFT 8 #define PS_OWB_MASK 0x00000F00 #define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */ #define PS_RING_SHIFT 6 #define PS_RING_MASK 0x000000C0 #define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */ #define PS_UM_SHIFT 5 #define PS_UM_MASK 0x00000020 #define PS_UM PS_UM_MASK #define PS_EXCM_SHIFT 4 #define PS_EXCM_MASK 0x00000010 #define PS_EXCM PS_EXCM_MASK #define PS_INTLEVEL_SHIFT 0 #define PS_INTLEVEL_MASK 0x0000000F #define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */ /* ABI-derived field values: */ #ifdef __XTENSA_CALL0_ABI__ #define PS_WOE_ABI 0 #define PS_WOECALL4_ABI 0 #else #define PS_WOE_ABI PS_WOE /* 0x40000 */ #define PS_WOECALL4_ABI (PS_WOE | PS_CALLINC(1)) /* 0x50000, per call4 */ #endif /* Backward compatibility (deprecated): */ #define PS_PROGSTACK_SHIFT PS_UM_SHIFT #define PS_PROGSTACK_MASK PS_UM_MASK #define PS_PROG_SHIFT PS_UM_SHIFT #define PS_PROG_MASK PS_UM_MASK #define PS_PROG PS_UM /* DBREAKCn register fields: */ #define DBREAKC_MASK_SHIFT 0 #define DBREAKC_MASK_MASK 0x0000003F #define DBREAKC_LOADBREAK_SHIFT 30 #define DBREAKC_LOADBREAK_MASK 0x40000000 #define DBREAKC_STOREBREAK_SHIFT 31 #define DBREAKC_STOREBREAK_MASK 0x80000000 /* DEBUGCAUSE register fields: */ #define DEBUGCAUSE_DEBUGINT_SHIFT 5 #define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */ #define DEBUGCAUSE_BREAKN_SHIFT 4 #define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.
118
N instruction */ #define DEBUGCAUSE_BREAK_SHIFT 3 #define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */ #define DEBUGCAUSE_DBREAK_SHIFT 2 #define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */ #define DEBUGCAUSE_IBREAK_SHIFT 1 #define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */ #define DEBUGCAUSE_ICOUNT_SHIFT 0 #define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */ /* MESR register fields: */ #define MESR_MEME 0x00000001 /* memory error */ #define MESR_MEME_SHIFT 0 #define MESR_DME 0x00000002 /* double memory error */ #define MESR_DME_SHIFT 1 #define MESR_RCE 0x00000010 /* recorded memory error */ #define MESR_RCE_SHIFT 4 #define MESR_LCE #define MESR_LCE_SHIFT ? #define MESR_LCE_L #define MESR_ERRENAB 0x00000100 #define MESR_ERRENAB_SHIFT 8 #define MESR_ERRTEST 0x00000200 #define MESR_ERRTEST_SHIFT 9 #define MESR_DATEXC 0x00000400 #define MESR_DATEXC_SHIFT 10 #define MESR_INSEXC 0x00000800 #define MESR_INSEXC_SHIFT 11 #define MESR_WAYNUM_SHIFT 16 #define MESR_ACCTYPE_SHIFT 20 #define MESR_MEMTYPE_SHIFT 24 #define MESR_ERRTYPE_SHIFT 30 /* MEMCTL register fields: */ #define MEMCTL_SNOOP_EN_SHIFT 1 #define MEMCTL_SNOOP_EN 0x02 /* enable snoop responses (default 0) */ #define MEMCTL_L0IBUF_EN_SHIFT 0 #define MEMCTL_L0IBUF_EN 0x01 /* enable loop instr.
118
buffer (default 1) */ #define MEMCTL_INV_EN_SHIFT 23 #define MEMCTL_INV_EN 0x00800000 /* invalidate cache ways being increased */ #define MEMCTL_DCWU_SHIFT 8 #define MEMCTL_DCWU_BITS 5 #define MEMCTL_DCWA_SHIFT 13 #define MEMCTL_DCWA_BITS 5 #define MEMCTL_ICWU_SHIFT 18 #define MEMCTL_ICWU_BITS 5 #define MEMCTL_DCWU_MASK 0x00001F00 /* Bits 8-12 dcache ways in use */ #define MEMCTL_DCWA_MASK 0x0003E000 /* Bits 13-17 dcache ways allocatable */ #define MEMCTL_ICWU_MASK 0x007C0000 /* Bits 18-22 icache ways in use */ #define MEMCTL_DCWU_CLR_MASK ~(MEMCTL_DCWU_MASK) #define MEMCTL_DCWA_CLR_MASK ~(MEMCTL_DCWA_MASK) #define MEMCTL_ICWU_CLR_MASK ~(MEMCTL_ICWU_MASK) #define MEMCTL_DCW_CLR_MASK (MEMCTL_DCWU_CLR_MASK | MEMCTL_DCWA_CLR_MASK) #define MEMCTL_IDCW_CLR_MASK (MEMCTL_DCW_CLR_MASK | MEMCTL_ICWU_CLR_MASK) #endif /*XTENSA_COREBITS_H*/
118
/* xtensa/hal.h -- contains a definition of the Core HAL interface All definitions in this header file are independent of any specific Xtensa processor configuration. Thus software (eg. OS, application, etc) can include this header file and be compiled into configuration- independent objects that can be distributed and eventually linked to the HAL library (libhal.a) to create a configuration-specific final executable. Certain definitions, however, are release/version-specific -- such as the XTHAL_RELEASE_xxx macros (or additions made in later versions). $Id: //depot/rel/Foxhill/dot.9/Xtensa/OS/target-os-src/hal.h.tpp#1 $ Copyright (c) 1999-2015 Cadence Design Systems, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
119
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef XTENSA_HAL_H #define XTENSA_HAL_H / Definitions Useful for Any Code, USER or PRIVILEGED / /* Constant Definitions (shared with assembly) */ /* */ #define XTHAL_RELEASE_MAJOR 12000 #define XTHAL_RELEASE_MINOR 9 #define XTHAL_RELEASE_NAME "12.0.9" #define XTHAL_REL_12 1 #define XTHAL_REL_12_0 1 #define XTHAL_REL_12_0_9 1 /* HAL version numbers (these names are for backward compatibility): */ #define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR #define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR /* */ /* Version comparison operators (among major/minor pairs): */ #define XTHAL_REL_GE(maja,mina, majb,minb) ((maja) > (majb) || \ ((maja) == (majb) && (mina) >= (minb))) #define XTHAL_REL_GT(maja,mina, majb,minb) ((maja) > (majb) || \ ((maja) == (majb) && (mina) > (minb))) #define XTHAL_REL_LE(maja,mina, majb,minb) ((maja) #include /* HAL */ /* Constant to be checked in build = (XTHAL_MAJOR_REV>1)+2:9) #define XTHAL_PREFETCH_BLOCKS(n) (0x0000000F80000000ULL + \ (((unsigned long long)_XTHAL_PREFETCH_BLOCKS(n)) = bit is set */ /* foMGIWACE 2345789 */ /* For instruction fetch: */ #define XTHAL_FAM_EXCEPTION 0x001 /* .
119
.......E 2345789 exception */ /*efine XTHAL_FAM_ISOLATE*/ /*0x012*/ /* .---I.-C- ....... isolate */ #define XTHAL_FAM_BYPASS 0x000 /* ..--- 2345789 bypass */ /*efine XTHAL_FAM_NACACHED*/ /*0x002*/ /* ..-C- ....... cached no-allocate (frozen) */ #define XTHAL_FAM_CACHED 0x006 /* ..AC- 2345789 cached */ /* For data load: */ #define XTHAL_LAM_EXCEPTION 0x001 /* ........E 2345789 exception */ #define XTHAL_LAM_ISOLATE 0x012 /* .---I.-C- 2345789 isolate */ #define XTHAL_LAM_BYPASS 0x000 /* .O---.--- 2...... bypass speculative */ #define XTHAL_LAM_BYPASSG 0x020 /* .O-G-.--- .345789 bypass guarded */ #define XTHAL_LAM_CACHED_NOALLOC 0x002 /* .O---.-C- 2345789 cached no-allocate speculative */ #define XTHAL_LAM_NACACHED XTHAL_LAM_CACHED_NOALLOC #define XTHAL_LAM_NACACHEDG 0x022 /* .O-G-.-C- .?..... cached no-allocate guarded */ #define XTHAL_LAM_CACHED 0x006 /* ..AC- 2345789 cached speculative */ #define XTHAL_LAM_COHCACHED 0x046 /* .-M--.AC- ....*89 cached speculative MP-coherent */ /* For data store: */ #define XTHAL_SAM_EXCEPTION 0x001 /* .
119
.......E 2345789 exception */ #define XTHAL_SAM_ISOLATE 0x032 /* .--GI--C- 2345789 isolate */ #define XTHAL_SAM_BYPASS 0x028 /* -O-G-W--- 2345789 bypass */ #define XTHAL_SAM_WRITETHRU 0x02A /* -O-G-W-C- 2345789 writethrough */ /*efine XTHAL_SAM_WRITETHRU_ALLOC*/ /*0x02E*/ /* -O-G-WAC- ....... writethrough allocate */ #define XTHAL_SAM_WRITEBACK 0x026 /* F--G--AC- ...5789 writeback */ #define XTHAL_SAM_WRITEBACK_NOALLOC 0x022 /* ?--G---C- .....89 writeback no-allocate */ #define XTHAL_SAM_COHWRITEBACK 0x066 /* F-MG--AC- ....*89 writeback MP-coherent */ /* For PIF attributes: */ /* -PIwrWCBUUUU ...9 */ #define XTHAL_PAM_BYPASS 0x000 /* xxx00000xxxx ...9 bypass non-bufferable */ #define XTHAL_PAM_BYPASS_BUF 0x010 /* xxx0000bxxxx ...9 bypass */ #define XTHAL_PAM_CACHED_NOALLOC 0x030 /* xxx0001bxxxx ...9 cached no-allocate */ #define XTHAL_PAM_WRITETHRU 0x0B0 /* xxx0101bxxxx ...9 writethrough (WT) */ #define XTHAL_PAM_WRITEBACK_NOALLOC 0x0F0 /* xxx0111bxxxx ...9 writeback no-alloc (WBNA) */ #define XTHAL_PAM_WRITEBACK 0x1F0 /* xxx1111bxxxx .
119
..9 writeback (WB) */ /*efine XTHAL_PAM_NORMAL*/ /*0x050*/ /* xxx0010bxxxx .... (unimplemented) */ /*efine XTHAL_PAM_WRITETHRU_WA*/ /*0x130*/ /* xxx1001bxxxx .... (unimplemented, less likely) */ /*efine XTHAL_PAM_WRITETHRU_RWA*/ /*0x1B0*/ /* xxx1101bxxxx .... (unimplemented, less likely) */ /*efine XTHAL_PAM_WRITEBACK_WA*/ /*0x170*/ /* xxx1011bxxxx .... (unimplemented, less likely) */ #if 0 /* Cache attribute encoding for CACHEATTR (per ISA): (Note: if this differs from ISA Ref Manual, ISA has precedence) Inst-fetches Loads Stores 0x0 FCA_EXCEPTION LCA_NACACHED SCA_WRITETHRU cached no-allocate (previously misnamed "uncached") 0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached 0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass cache (what most people call uncached) 0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) 0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK[M] write-back [MP-coherent] or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) 0x5 FCA_CACHED LCA_CACHED SCA_WRITEBACK_NOALLOC write-back no-allocate or FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) 0x6.
119
.D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved) 0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate 0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal */ #endif /*0*/ #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) #ifdef __cplusplus extern "C" { #endif /* Register Windows */ /* This spill any live register windows (other than the caller's): extern void xthal_window_spill( void ); /* Optional/Custom Processor State */ /* validate & invalidate the TIE register file */ extern void xthal_validate_cp(int); extern void xthal_invalidate_cp(int); /* read and write cpenable register */ extern void xthal_set_cpenable(unsigned); extern unsigned xthal_get_cpenable(void); /* Interrupts */ /* the number of interrupt levels */ extern const unsigned char Xthal_num_intlevels; /* the number of interrupts */ extern const unsigned char Xthal_num_interrupts; /* the highest level of interrupts masked by PS.EXCM */ extern const unsigned char Xthal_excm_level; /* mask for level of interrupts */ extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS]; /* mask for level 0 to N interrupts */ extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS]; /* level of each interrupt */ extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS]; /* type per interrupt */ extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS]; /* masks of each type of interrupt */ extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES]; /* interrupt numbers assigned to each timer interrupt */ extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS]; /* INTENABLE,INTERRUPT,INTSET,INTCLEAR register access functions: */ extern unsigned xthal_get_intenable( void ); extern void xthal_set_intenable( unsigned ); extern unsigned xthal_get_interrupt( void ); #define xthal_get_intread xthal_get_interrupt /* backward compatibility */ /* These two functions are deprecated.
119
Use the newer functions xthal_interrupt_trigger and xthal_interrupt_clear instead. */ extern void xthal_set_intset( unsigned ); extern void xthal_set_intclear( unsigned ); /* Debug */ /* Number of instruction and data break registers: */ extern const int Xthal_num_ibreak; extern const int Xthal_num_dbreak; /* Core Counter */ /* counter info */ extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */ extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */ /* get CCOUNT register (if not present return 0) */ extern unsigned xthal_get_ccount(void); /* set and get CCOMPAREn registers (if not present, get returns 0) */ extern void xthal_set_ccompare(int, unsigned); extern unsigned xthal_get_ccompare(int); /* Miscellaneous */ extern const unsigned char Xthal_have_prid; extern const unsigned char Xthal_have_exceptions; extern const unsigned char Xthal_xea_version; extern const unsigned char Xthal_have_interrupts; extern const unsigned char Xthal_have_highlevel_interrupts; extern const unsigned char Xthal_have_nmi; extern unsigned xthal_get_prid( void ); /* Virtual interrupt prioritization (DEPRECATED) */ /* Convert between interrupt levels (as per PS.
119
INTLEVEL) and virtual interrupt priorities: */ extern unsigned xthal_vpri_to_intlevel(unsigned vpri); extern unsigned xthal_intlevel_to_vpri(unsigned intlevel); /* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */ /* These functions are deprecated. Use xthal_interrupt_enable and xthal_interrupt_disable instead. */ extern unsigned xthal_int_enable(unsigned); extern unsigned xthal_int_disable(unsigned); /* Set/get virtual priority of an interrupt: */ extern int xthal_set_int_vpri(int intnum, int vpri); extern int xthal_get_int_vpri(int intnum); /* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */ extern void xthal_set_vpri_locklevel(unsigned intlevel); extern unsigned xthal_get_vpri_locklevel(void); /* Set/get current virtual interrupt priority: */ extern unsigned xthal_set_vpri(unsigned vpri); extern unsigned xthal_get_vpri(void); extern unsigned xthal_set_vpri_intlevel(unsigned intlevel); extern unsigned xthal_set_vpri_lock(void); /* Generic Interrupt Trampolining Support (DEPRECATED) */ typedef void (XtHalVoidFunc)(void); /* Bitmask of interrupts currently trampolining down: */ extern unsigned Xthal_tram_pending; /* */ extern unsigned Xthal_tram_enabled; /* Bitmask of interrupts configured for sync trampolining: */ extern unsigned Xthal_tram_sync; /* Trampoline support functions: */ extern unsigned xthal_tram_pending_to_service( void ); extern void xthal_tram_done( unsigned serviced_mask ); extern int xthal_tram_set_sync( int intnum, int sync ); extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn ); /* Internal Memories */ extern const unsigned char Xthal_num_instrom; extern const unsigned char Xthal_num_instram; extern const unsigned char Xthal_num_datarom; extern const unsigned char Xthal_num_dataram; extern const unsigned char Xthal_num_xlmi; /* Each of the following arrays contains at least one entry, extern const unsigned int Xthal_instrom_vaddr[]; extern const unsigned int Xthal_instrom_paddr[]; extern const unsigned int Xthal_instrom_size []; extern const unsigned int Xthal_instram_vaddr[]; extern const unsigned int Xthal_instram_paddr[]; extern const unsigned int Xthal_instram_size []; extern const unsigned int Xthal_datarom_vaddr[]; extern const unsigned int Xthal_datarom_paddr[]; extern const unsigned int Xthal_datarom_size []; extern const unsigned int Xthal_dataram_vaddr[]; extern const unsigned int Xthal_dataram_paddr[]; extern const unsigned int Xthal_dataram_size []; extern const unsigned int Xthal_xlmi_vaddr[]; extern const unsigned int Xthal_xlmi_paddr[]; extern const unsigned int Xthal_xlmi_size []; /* Cache */ /* number of cache sets in log2(lines per way) */ extern const unsigned char Xthal_icache_setwidth; extern const unsigned char Xthal_dcache_setwidth; /* cache set associativity (number of ways) */ extern const unsigned int Xthal_icache_ways; extern const unsigned int Xthal_dcache_ways; /* cache features */ extern const unsigned char Xthal_icache_line_lockable; extern const unsigned char Xthal_dcache_line_lockable; /* cache attribute register control (used by other HAL routines) */ extern unsigned xthal_get_cacheattr( void ); extern unsigned xthal_get_icacheattr( void ); extern unsigned xthal_get_dcacheattr( void ); extern void xthal_set_cacheattr( unsigned ); extern void xthal_set_icacheattr( unsigned ); extern void xthal_set_dcacheattr( unsigned ); /* set cache attribute (access modes) for a range of memory */ extern int xthal_set_region_attribute( void *addr, unsigned size, unsigned cattr, unsigned flags ); /* Bits of flags parameter to xthal_set_region_attribute(): */ #define XTHAL_CAFLAG_EXPAND 0x000100 /* only expand allowed access to range, don't reduce it */ #define XTHAL_CAFLAG_EXACT 0x000200 /* return error if can't apply change to exact range specified */ #define XTHAL_CAFLAG_NO_PARTIAL 0x000400 /* don't apply change to regions partially covered by range */ #define XTHAL_CAFLAG_NO_AUTO_WB 0x000800 /* don't writeback data after leaving writeback attribute */ #define XTHAL_CAFLAG_NO_AUTO_INV 0x001000 /* don't invalidate after disabling cache (entering bypass) */ /* enable caches */ extern void xthal_icache_enable( void ); /* DEPRECATED */ extern void xthal_dcache_enable( void ); /* DEPRECATED */ /* disable caches */ extern void xthal_icache_disable( void ); /* DEPRECATED */ extern void xthal_dcache_disable( void ); /* DEPRECATED */ /* whole cache operations (privileged) */ extern void xthal_icache_all_invalidate( void ); extern void xthal_dcache_all_invalidate( void ); extern void xthal_dcache_all_writeback( void ); extern void xthal_dcache_all_writeback_inv( void ); extern void xthal_icache_all_unlock( void ); extern void xthal_dcache_all_unlock( void ); /* address-range cache operations (privileged) */ /* prefetch and lock specified memory range into cache */ extern void xthal_icache_region_lock( void *addr, unsigned size ); extern void xthal_dcache_region_lock( void *addr, unsigned size ); /* unlock from cache */ extern void xthal_icache_region_unlock( void *addr, unsigned size ); extern void xthal_dcache_region_unlock( void *addr, unsigned size ); /* huge-range cache operations (privileged) (EXPERIMENTAL) */ extern void xthal_icache_hugerange_invalidate( void *addr, unsigned size ); extern void xthal_icache_hugerange_unlock( void *addr, unsigned size ); extern void xthal_dcache_hugerange_invalidate( void *addr, unsigned size ); extern void xthal_dcache_hugerange_unlock( void *addr, unsigned size ); extern void xthal_dcache_hugerange_writeback( void *addr, unsigned size ); extern void xthal_dcache_hugerange_writeback_inv( void *addr, unsigned size ); # ifndef XTHAL_USE_CACHE_MACROS /* cache line operations (privileged) */ extern void xthal_icache_line_lock(void *addr); extern void xthal_dcache_line_lock(void *addr); extern void xthal_icache_line_unlock(void *addr); extern void xthal_dcache_line_unlock(void *addr); # endif /* Local Memory ECC/Parity */ /* Inject memory errors; flags is bit combination of XTHAL_MEMEP_F_xxx: */ extern void xthal_memep_inject_error(void *addr, int size, int flags); /* Memory Management Unit */ extern const unsigned char Xthal_have_spanning_way; extern const unsigned char Xthal_have_identity_map; extern const unsigned char Xthal_have_mimic_cacheattr; extern const unsigned char Xthal_have_xlt_cacheattr; extern const unsigned char Xthal_have_cacheattr; extern const unsigned char Xthal_have_tlbs; extern const unsigned char Xthal_mmu_asid_bits; /* 0 .
119
. 8 */ extern const unsigned char Xthal_mmu_asid_kernel; extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */ extern const unsigned char Xthal_mmu_ring_bits; extern const unsigned char Xthal_mmu_sr_bits; extern const unsigned char Xthal_mmu_ca_bits; extern const unsigned int Xthal_mmu_max_pte_page_size; extern const unsigned int Xthal_mmu_min_pte_page_size; extern const unsigned char Xthal_itlb_way_bits; extern const unsigned char Xthal_itlb_ways; extern const unsigned char Xthal_itlb_arf_ways; extern const unsigned char Xthal_dtlb_way_bits; extern const unsigned char Xthal_dtlb_ways; extern const unsigned char Xthal_dtlb_arf_ways; /* Return error codes for hal functions */ /* function sucessful, operation completed as expected */ #define XTHAL_SUCCESS 0 /* XTHAL_CAFLAGS_NO_PARTIAL was specified, and no full region is #define XTHAL_NO_REGIONS_COVERED -1 /* The XTHAL_CAFLAGS_EXACT flag was given, but no exact mapping is possible.
119
*/ #define XTHAL_INEXACT -2 /* The supplied address doesn't correspond to the start of a region. */ #define XTHAL_INVALID_ADDRESS -3 /* This functionality is not available on this architecture. */ #define XTHAL_UNSUPPORTED -4 /* Translation failed because vaddr and paddr were not aligned. */ #define XTHAL_ADDRESS_MISALIGNED -5 /* There is mapping for the supplied address. */ #define XTHAL_NO_MAPPING -6 /* The requested access rights are not supported */ #define XTHAL_BAD_ACCESS_RIGHTS -7 /* The requested memory type is not supported */ #define XTHAL_BAD_MEMORY_TYPE -8 /* The entries supplied are not properly aligned to the MPU's background map. */ #define XTHAL_MAP_NOT_ALIGNED -9 /* There are not enough MPU entries available to do the requeste mapping. */ #define XTHAL_OUT_OF_ENTRIES -10 /* The entries supplied are not properly ordered for the MPU.
119
*/ #define XTHAL_OUT_OF_ORDER_MAP -11 /* an invalid argument such as a null pointer was supplied to the function */ #define XTHAL_INVALID -12 /* specified region is of zero size, therefore no mapping is done. */ #define XTHAL_ZERO_SIZED_REGION -13 /* specified range wraps around '0' */ #define XTHAL_INVALID_ADDRESS_RANGE -14 /* For backward compatibility we retain the following inconsistenly named constants. Do not use them as they may be removed in a future release. */ #define XCHAL_SUCCESS XTHAL_SUCCESS #define XCHAL_ADDRESS_MISALIGNED XTHAL_ADDRESS_MISALIGNED #define XCHAL_INEXACT XTHAL_INEXACT #define XCHAL_INVALID_ADDRESS XTHAL_INVALID_ADDRESS #define XCHAL_UNSUPPORTED_ON_THIS_ARCH XTHAL_UNSUPPORTED #define XCHAL_NO_PAGES_MAPPED XTHAL_NO_REGIONS_COVERED /* Convert between virtual and physical addresses (through static maps only) */ extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp ); extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached ); extern int xthal_set_region_translation(void* vaddr, void* paddr, unsigned size, unsigned cache_atr, unsigned flags); extern int xthal_v2p(void*, void**, unsigned*, unsigned*); extern int xthal_invalidate_region(void* addr); extern int xthal_set_region_translation_raw(void *vaddr, void *paddr, unsigned cattr); /* MPU (Memory Protection Unit) */ /* /* MPU access rights constants: Only the combinations listed below are supported by the MPU.
119
*/ #define XTHAL_AR_NONE 0 /* no access */ #define XTHAL_AR_R 4 /* Kernel read, User no access*/ #define XTHAL_AR_RX 5 /* Kernel read/execute, User no access */ #define XTHAL_AR_RW 6 /* Kernel read/write, User no access */ #define XTHAL_AR_RWX 7 /* Kernel read/write/execute, User no access */ #define XTHAL_AR_Ww 8 /* Kernel write, User write */ #define XTHAL_AR_RWrwx 9 /* Kernel read/write , User read/write/execute */ #define XTHAL_AR_RWr 10 /* Kernel read/write, User read */ #define XTHAL_AR_RWXrx 11 /* Kernel read/write/execute, User read/execute */ #define XTHAL_AR_Rr 12 /* Kernel read, User read */ #define XTHAL_AR_RXrx 13 /* Kernel read/execute, User read/execute */ #define XTHAL_AR_RWrw 14 /* Kernel read/write, User read/write */ #define XTHAL_AR_RWXrwx 15 /* Kernel read/write/execute, User read/write/execute */ #define XTHAL_AR_WIDTH 4 /* # bits used to encode access rights */ /* If the bit XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS is set in the accessRights */ #define XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS 0x00002000 /* If the bit XTHAL_MPU_USE_EXISTING_MEMORY_TYPE is set in the memoryType */ #define XTHAL_MPU_USE_EXISTING_MEMORY_TYPE 0x00004000 /* The following groups of constants are bit-wise or'd together to specify /* The following group of constants are used to specify cache attributes of */ #define XTHAL_MEM_DEVICE 0x00008000 #define XTHAL_MEM_NON_CACHEABLE 0x00090000 #define XTHAL_MEM_WRITETHRU_NOALLOC 0x00080000 #define XTHAL_MEM_WRITETHRU 0x00040000 #define XTHAL_MEM_WRITETHRU_WRITEALLOC 0x00060000 #define XTHAL_MEM_WRITEBACK_NOALLOC 0x00050000 #define XTHAL_MEM_WRITEBACK 0x00070000 /* Indicates a read is interruptible.
119
Only applicable to devices */ #define XTHAL_MEM_INTERRUPTIBLE 0x08000000 /* Indicates if writes to this memory are bufferable ... only applicable */ #define XTHAL_MEM_BUFFERABLE 0x01000000 /* The following group of constants indicates the scope of the sharing of */ #define XTHAL_MEM_NON_SHAREABLE 0x00000000 #define XTHAL_MEM_INNER_SHAREABLE 0x02000000 #define XTHAL_MEM_OUTER_SHAREABLE 0x04000000 #define XTHAL_MEM_SYSTEM_SHAREABLE 0x06000000 /* */ #define XTHAL_MEM_PROC_CACHE(system, processor) \ (((system) & 0x000f0000) | (((processor) & 0x000f0000 ) > 8) & 0xf) #define XTHAL_MPU_ENTRY_SET_ACCESS(x, accessRights) ((x).at = \ ((x).at & 0xfffff0ff) | (((accessRights) & 0xf) > 12) & 0x1ff) #define XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(x, memtype) ((x).at = \ ((x).at & 0xffe00fff) | (((XTHAL_ENCODE_MEMORY_TYPE(memtype)) & 0x1ff) 0 && n 1 if there are no cacheable areas in the corresponding 512MB extern uint32_t xthal_calc_cacheadrdis(const struct xthal_MPU_entry* e, uint32_t n); /* extern int xthal_mpu_set_region_attribute(void* vaddr, size_t size, int32_t accessRights, int32_t memoryType, uint32_t flags); /* The following are internal implementation macros.
119
These should not */ /* */ #define _XTHAL_SYSTEM_CACHE_BITS 0x000f0000 #define _XTHAL_LOCAL_CACHE_BITS 0x00f00000 #define _XTHAL_MEM_SYSTEM_RWC_MASK 0x00070000 #define _XTHAL_MEM_LOCAL_RWC_MASK 0x00700000 #define _XTHAL_SHIFT_RWC 16 #define _XTHAL_MEM_ANY_SHAREABLE(x) (((x) & XTHAL_MEM_SYSTEM_SHAREABLE) ? 1 : 0) #define _XTHAL_MEM_INNER_SHAREABLE(x) ((((x) & XTHAL_MEM_SYSTEM_SHAREABLE) \ == XTHAL_MEM_INNER_SHAREABLE) ? 1 : 0) #define _XTHAL_MEM_IS_BUFFERABLE(x) (((x) & XTHAL_MEM_BUFFERABLE) ? 1 : 0) #define _XTHAL_MEM_IS_DEVICE(x) (((x) & XTHAL_MEM_DEVICE) ? 1 : 0) #define _XTHAL_NON_CACHEABLE_DOMAIN(x) \ (_XTHAL_MEM_IS_DEVICE(x) || _XTHAL_MEM_ANY_SHAREABLE(x)? 0x3 : 0) #define _XTHAL_CACHEABLE_DOMAIN(x) (_XTHAL_MEM_ANY_SHAREABLE(x) ? \ 0x3 : 0x1) #define _XTHAL_MEM_CACHE_MASK(x) ((x) & _XTHAL_SYSTEM_CACHE_BITS) #define _XTHAL_IS_SYSTEM_NONCACHEABLE(x) \ (((_XTHAL_MEM_CACHE_MASK(x) & XTHAL_MEM_NON_CACHEABLE) == \ XTHAL_MEM_NON_CACHEABLE) ?
119
1 : 0) #define _XTHAL_ENCODE_DEVICE(x) \ (((((x) & XTHAL_MEM_INTERRUPTIBLE) ? 1 : 0) > 4) & XTHAL_MEM_NON_CACHEABLE) == \ XTHAL_MEM_NON_CACHEABLE) ? 1 : 0) ? \ (_XTHAL_CACHEABLE_DOMAIN(x) > _XTHAL_SHIFT_RWC) #define _XTHAL_ENCODE_SYSTEM_CACHEABLE_LOCAL_CACHEABLE(x) \ ((_XTHAL_CACHEABLE_DOMAIN(x) > _XTHAL_SHIFT_RWC )) /* End of internal macros */ /* The functions and constants below here have been deprecated.*/ #define XTHAL_MEM_NON_CACHED XTHAL_MEM_NON_CACHEABLE #define XTHAL_MEM_NON_SHARED XTHAL_MEM_NON_SHAREABLE #define XTHAL_MEM_INNER_SHARED XTHAL_MEM_INNER_SHAREABLE #define XTHAL_MEM_OUTER_SHARED XTHAL_MEM_OUTER_SHAREABLE #define XTHAL_MEM_SYSTEM_SHARED XTHAL_MEM_SYSTEM_SHAREABLE #define XTHAL_MEM_SW_SHAREABLE 0 #define xthal_is_cached(memoryType) (xthal_is_cacheable((memoryType))) extern int32_t xthal_read_background_map(struct xthal_MPU_entry* entries); /* end deprecated functions and constants */ #ifdef __cplusplus } #endif #endif /*!
119
_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */ #endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ / EXPERIMENTAL and DEPRECATED Definitions / #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) #ifdef __cplusplus extern "C" { #endif #ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE #define XTHAL_24_BIT_BREAK 0x80000000 #define XTHAL_16_BIT_BREAK 0x40000000 extern const unsigned short Xthal_ill_inst_16[16]; #define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */ #define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */ #define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */ #define XTHAL_RFW_INST 0x00000800 #define XTHAL_RFUE_INST 0x00000400 #define XTHAL_RFI_INST 0x00000200 #define XTHAL_RFE_INST 0x00000100 #define XTHAL_RET_INST 0x00000080 #define XTHAL_BREAK_INST 0x00000040 #define XTHAL_SYSCALL_INST 0x00000020 #define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */ #define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */ #define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */ #define XTHAL_24_BIT_INST 0x00000002 #define XTHAL_16_BIT_INST 0x00000001 typedef struct xthal_state { unsigned pc; unsigned ar[16]; unsigned lbeg; unsigned lend; unsigned lcount; unsigned extra_ptr; unsigned cpregs_ptr[XTHAL_MAX_CPS]; } XTHAL_STATE; extern unsigned int xthal_inst_type(void *addr); extern unsigned int xthal_branch_addr(void *addr); extern unsigned int xthal_get_npc(XTHAL_STATE *user_state); #endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */ #ifdef __cplusplus } #endif #endif /*!
119
_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */ #endif /*XTENSA_HAL_H*/
119
/* $Id: //depot/dev/Foxhill/Xtensa/OS/include/xtensa/mpuasm.h#5 $ */ /* */ #ifndef _IDMAASM_H_ #define _IDMAASM_H_ #if XCHAL_HAVE_IDMA #include #endif /* */ // IDMA_REG_SETTINGS, // IDMA_REG_TIMEOUT, // IDMA_REG_DESC_START, // IDMA_REG_CONTROL, // IDMA_REG_USERPRIV, .macro _idma_restore a_save, a_temp1, a_temp2, a_temp3 #if XCHAL_HAVE_IDMA l32i \a_temp1, \a_save, 0 movi \a_temp3, idmareg_base movi \a_temp2, IDMA_REG_SETTINGS add \a_temp2, \a_temp2, \a_temp3 wer \a_temp1, \a_temp2 l32i \a_temp1, \a_save, 4 movi \a_temp2, IDMA_REG_TIMEOUT add \a_temp2, \a_temp2, \a_temp3 wer \a_temp1, \a_temp2 l32i \a_temp1, \a_save, 8 movi \a_temp2, IDMA_REG_DESC_START add \a_temp2, \a_temp2, \a_temp3 wer \a_temp1, \a_temp2 l32i \a_temp1, \a_save, 12 movi \a_temp2, IDMA_REG_CONTROL add \a_temp2, \a_temp2, \a_temp3 wer \a_temp1, \a_temp2 l32i \a_temp1, \a_save, 16 movi \a_temp2, IDMA_REG_USERPRIV add \a_temp2, \a_temp2, \a_temp3 wer \a_temp1, \a_temp2 #endif .endm #endif //_IDMAASM_H_
120
/* xer-constants.h -- various constants describing external registers accessed via wer and rer. TODO: find a better prefix. Also conditionalize certain constants based on number of cores and interrupts actually present. */ /* */ #include #define NUM_INTERRUPTS 27 #define NUM_CORES 4 /* Routing of NMI (BInterrupt2) and interrupts 0..n-1 (BInterrupt3+) RER reads WER writes */ #define XER_MIROUT 0x0000 #define XER_MIROUT_LAST (XER_MIROUT + NUM_INTERRUPTS) /* IPI to core M (all 16 causes). RER reads WER clears */ #define XER_MIPICAUSE 0x0100 #define XER_MIPICAUSE_FIELD_A_FIRST 0x0 #define XER_MIPICAUSE_FIELD_A_LAST 0x0 #define XER_MIPICAUSE_FIELD_B_FIRST 0x1 #define XER_MIPICAUSE_FIELD_B_LAST 0x3 #define XER_MIPICAUSE_FIELD_C_FIRST 0x4 #define XER_MIPICAUSE_FIELD_C_LAST 0x7 #define XER_MIPICAUSE_FIELD_D_FIRST 0x8 #define XER_MIPICAUSE_FIELD_D_LAST 0xF /* IPI from cause bit 0..15 RER invalid WER sets */ #define XER_MIPISET 0x0140 #define XER_MIPISET_LAST 0x014F /* Global enable RER read WER clear */ #define XER_MIENG 0x0180 /* Global enable RER invalid WER set */ #define XER_MIENG_SET 0x0184 /* Global assert RER read WER clear */ #define XER_MIASG 0x0188 /* Global enable RER invalid WER set */ #define XER_MIASG_SET 0x018C /* IPI partition register RER read WER write */ #define XER_PART 0x0190 #define XER_IPI0 0x0 #define XER_IPI1 0x1 #define XER_IPI2 0x2 #define XER_IPI3 0x3 #define XER_PART_ROUTE_IPI(NUM, FIELD) ((NUM) << ((FIELD) << 2)) #define XER_PART_ROUTE_IPI_CAUSE(TO_A, TO_B, TO_C, TO_D) \ (XER_PART_ROUTE_IPI(TO_A, XER_IPI0) | \ XER_PART_ROUTE_IPI(TO_B, XER_IPI1) | \ XER_PART_ROUTE_IPI(TO_C, XER_IPI2) | \ XER_PART_ROUTE_IPI(TO_D, XER_IPI3)) #define XER_IPI_WAKE_EXT_INTERRUPT XCHAL_EXTINT0_NUM #define XER_IPI_WAKE_CAUSE XER_MIPICAUSE_FIELD_C_FIRST #define XER_IPI_WAKE_ADDRESS (XER_MIPISET + XER_IPI_WAKE_CAUSE) #define XER_DEFAULT_IPI_ROUTING XER_PART_ROUTE_IPI_CAUSE(XER_IPI1, XER_IPI0, XER_IPI2, XER_IPI3) /* System configuration ID RER read WER invalid */ #define XER_SYSCFGID 0x01A0 /* RunStall to slave processors RER read WER write */ #define XER_MPSCORE 0x0200 /* Cache coherency ON RER read WER write */ #define XER_CCON 0x0220
121
/* */ /* */ #ifndef XTENSA_CACHEATTRASM_H #define XTENSA_CACHEATTRASM_H #include /* Determine whether cache attributes are controlled using eight 512MB entries: */ #define XCHAL_CA_8X512 (XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \ || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)) /* */ / GENERIC -- ALL CACHES / /* */ .macro _cacheattr_get tlb #if XCHAL_HAVE_CACHEATTR rsr.cacheattr a2 #elif XCHAL_CA_8X512 // We have a config that "mimics" CACHEATTR using a simplified // "MMU" composed of a single statically-mapped way. // DTLB and ITLB are independent, so there's no single // cache attribute that can describe both. So for now // just return the DTLB state. movi a5, 0xE0000000 movi a2, 0 movi a3, XCHAL_SPANNING_WAY 1: add a3, a3, a5 // next segment r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0 dsync // interlock??? slli a2, a2, 4 extui a4, a4, 0, 4 // extract CA or a2, a2, a4 bgeui a3, 16, 1b #else // This macro isn't applicable to arbitrary MMU configurations.
122
// Just return zero. movi a2, 0 #endif .endm .macro icacheattr_get _cacheattr_get itlb .endm .macro dcacheattr_get _cacheattr_get dtlb .endm /* Default (powerup/reset) value of CACHEATTR, all BYPASS mode (ie. disabled/bypassed caches): */ #if XCHAL_HAVE_PTP_MMU # define XCHAL_CACHEATTR_ALL_BYPASS 0x33333333 #else # define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 #endif #if XCHAL_CA_8X512 #if XCHAL_HAVE_PTP_MMU # define XCHAL_FCA_ENAMASK 0x0AA0 /* bitmap of fetch attributes that require enabled icache */ # define XCHAL_LCA_ENAMASK 0x0FF0 /* bitmap of load attributes that require enabled dcache */ # define XCHAL_SCA_ENAMASK 0x0CC0 /* bitmap of store attributes that require enabled dcache */ #else # define XCHAL_FCA_ENAMASK 0x003A /* bitmap of fetch attributes that require enabled icache */ # define XCHAL_LCA_ENAMASK 0x0033 /* bitmap of load attributes that require enabled dcache */ # define XCHAL_SCA_ENAMASK 0x0033 /* bitmap of store attributes that require enabled dcache */ #endif #define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */ #define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */ /* */ .
122
macro _cacheattr_is_enabled label movi a4, 8 // loop 8 times .Lcaife\@: extui a5, a2, 0, 4 // get CA nibble ssr a5 // index into mask according to CA... srl a5, a3 // ...and get CA's mask bit in a5 bit 0 bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label srli a2, a2, 4 // next nibble addi a4, a4, -1 bnez a4, .Lcaife\@ // loop for each nibble .endm #else /* XCHAL_CA_8X512 */ .macro _cacheattr_is_enabled label j \label // macro not applicable, assume caches always enabled .endm #endif /* XCHAL_CA_8X512 */ /* */ .macro icacheattr_is_enabled label #if XCHAL_CA_8X512 icacheattr_get movi a3, XCHAL_FCA_ENAMASK #endif _cacheattr_is_enabled \label .endm /* */ .macro dcacheattr_is_enabled label #if XCHAL_CA_8X512 dcacheattr_get movi a3, XCHAL_LSCA_ENAMASK #endif _cacheattr_is_enabled \label .endm /* */ .macro cacheattr_is_enabled label #if XCHAL_HAVE_CACHEATTR rsr.cacheattr a2 movi a3, XCHAL_ALLCA_ENAMASK #elif XCHAL_CA_8X512 icacheattr_get movi a3, XCHAL_FCA_ENAMASK _cacheattr_is_enabled \label dcacheattr_get movi a3, XCHAL_LSCA_ENAMASK #endif _cacheattr_is_enabled \label .
122
endm /* */ #if XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR // We have a config that "mimics" CACHEATTR using a simplified // "MMU" composed of a single statically-mapped way. /* */ .macro icacheattr_set movi a5, 0xE0000000 // mask of upper 3 bits movi a6, 3f // PC where ITLB is set movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .. 7) mov a7, a2 // copy a2 so it doesn't get clobbered and a6, a6, a5 // upper 3 bits of local PC area j 3f // Use micro-architecture specific method. // The following 4-instruction sequence is aligned such that // it all fits within a single I-cache line. Sixteen byte // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE // actually causes problems because that can be greater than // the alignment of the reset vector, where this macro is often // invoked, which would cause the linker to align the reset // vector code away from the reset vector!!). .begin no-transform .align 16 /*XCHAL_ICACHE_LINESIZE*/ 1: witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB isync .
122
end no-transform nop nop sub a3, a3, a5 // next segment (add 0x20000000) bltui a3, 16, 4f // done? // Note that in the WITLB loop, we don't do any load/stores // (may not be an issue here, but it is important in the DTLB case). 2: srli a7, a7, 4 // next CA 3: # if XCHAL_HAVE_MIMIC_CACHEATTR extui a4, a7, 0, 4 // extract CA to set # else /* have translation, preserve it: */ ritlb1 a8, a3 // get current PPN+CA of segment //dsync // interlock??? extui a4, a7, 0, 4 // extract CA to set srli a8, a8, 4 // clear CA but keep PPN ... slli a8, a8, 4 // ... add a4, a4, a8 // combine new CA with PPN to preserve # endif beq a3, a6, 1b // current PC's region? if so, do it in a safe way witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB sub a3, a3, a5 // next segment (add 0x20000000) bgeui a3, 16, 2b isync // make sure all ifetch changes take effect 4: .endm // icacheattr_set /* */ .macro dcacheattr_set movi a5, 0xE0000000 // mask of upper 3 bits movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .
122
. 7) mov a7, a2 // copy a2 so it doesn't get clobbered // Note that in the WDTLB loop, we don't do any load/stores 2: // (including implicit l32r via movi) because it isn't safe. # if XCHAL_HAVE_MIMIC_CACHEATTR extui a4, a7, 0, 4 // extract CA to set # else /* have translation, preserve it: */ rdtlb1 a8, a3 // get current PPN+CA of segment //dsync // interlock??? extui a4, a7, 0, 4 // extract CA to set srli a8, a8, 4 // clear CA but keep PPN ... slli a8, a8, 4 // ... add a4, a4, a8 // combine new CA with PPN to preserve # endif wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB sub a3, a3, a5 // next segment (add 0x20000000) srli a7, a7, 4 // next CA bgeui a3, 16, 2b dsync // make sure all data path changes take effect .endm // dcacheattr_set #endif /* XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR */ /* */ .macro cacheattr_set #if XCHAL_HAVE_CACHEATTR # if XCHAL_ICACHE_LINESIZE < 4 // No i-cache, so can always safely write to CACHEATTR: wsr.cacheattr a2 # else // The Athens micro-architecture, when using the old // exception architecture option (ie.
122
with the CACHEATTR register) // allows changing the cache attributes of the running code // using the following exact sequence aligned to be within // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE // alignment actually causes problems because that can be greater // than the alignment of the reset vector, where this macro is often // invoked, which would cause the linker to align the reset // vector code away from the reset vector!!). j 1f .begin no-transform .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line 1: wsr.cacheattr a2 isync .end no-transform nop nop # endif #elif XCHAL_CA_8X512 // DTLB and ITLB are independent, but to keep semantics // of this macro we simply write to both. icacheattr_set dcacheattr_set #else // This macro isn't applicable to arbitrary MMU configurations. // Do nothing in this case. #endif .endm #endif /*XTENSA_CACHEATTRASM_H*/
122
/* */ #ifndef __XT_PERF_CONSTS_H__ #define __XT_PERF_CONSTS_H__ #include /* */ #define XTPERF_CNT_COMMITTED_INSN 0x8002 /* Instructions committed */ #define XTPERF_CNT_BRANCH_PENALTY 0x8003 /* Branch penalty cycles */ #define XTPERF_CNT_PIPELINE_INTERLOCKS 0x8004 /* Pipeline interlocks cycles */ #define XTPERF_CNT_ICACHE_MISSES 0x8005 /* ICache misses penalty in cycles */ #define XTPERF_CNT_DCACHE_MISSES 0x8006 /* DCache misses penalty in cycles */ #define XTPERF_CNT_CYCLES 0 /* Count cycles */ #define XTPERF_CNT_OVERFLOW 1 /* Overflow of counter n-1 (assuming this is counter n) */ #define XTPERF_CNT_INSN 2 /* Successfully completed instructions */ #define XTPERF_CNT_D_STALL 3 /* Data-related GlobalStall cycles */ #define XTPERF_CNT_I_STALL 4 /* Instruction-related and other GlobalStall cycles */ #define XTPERF_CNT_EXR 5 /* Exceptions and pipeline replays */ #define XTPERF_CNT_BUBBLES 6 /* Hold and other bubble cycles */ #define XTPERF_CNT_I_TLB 7 /* Instruction TLB Accesses (per instruction retiring) */ #define XTPERF_CNT_I_MEM 8 /* Instruction memory accesses (per instruction retiring) */ #define XTPERF_CNT_D_TLB 9 /* Data TLB accesses */ #define XTPERF_CNT_D_LOAD_U1 10 /* Data memory load instruction (load-store unit 1) */ #define XTPERF_CNT_D_STORE_U1 11 /* Data memory store instruction (load-store unit 1) */ #define XTPERF_CNT_D_ACCESS_U1 12 /* Data memory accesses (load, store, S32C1I, etc; load-store unit 1) */ #define XTPERF_CNT_D_LOAD_U2 13 /* Data memory load instruction (load-store unit 2) */ #define XTPERF_CNT_D_STORE_U2 14 /* Data memory store instruction (load-store unit 2) */ #define XTPERF_CNT_D_ACCESS_U2 15 /* Data memory accesses (load, store, S32C1I, etc; load-store unit 2) */ #define XTPERF_CNT_D_LOAD_U3 16 /* Data memory load instruction (load-store unit 3) */ #define XTPERF_CNT_D_STORE_U3 17 /* Data memory store instruction (load-store unit 3) */ #define XTPERF_CNT_D_ACCESS_U3 18 /* Data memory accesses (load, store, S32C1I, etc; load-store unit 3) */ #define XTPERF_CNT_MULTIPLE_LS 22 /* Multiple Load/Store */ #define XTPERF_CNT_OUTBOUND_PIF 23 /* Outbound PIF transactions */ #define XTPERF_CNT_INBOUND_PIF 24 /* Inbound PIF transactions */ #define XTPERF_CNT_PREFETCH 26 /* Prefetch events */ #if XCHAL_HW_VERSION >= 270004 #define XTPERF_CNT_IDMA 27 /* iDMA counters */ #define XTPERF_CNT_INSN_LENGTH 28 /* Instruction length counters */ #endif /* HW version >= 270004 */ /* */ /* XTPERF_CNT_COMMITTED_INSN selector mask */ #define XTPERF_MASK_COMMITTED_INSN 0x0001 /* XTPERF_CNT_BRANCH_PENALTY selector mask */ #define XTPERF_MASK_BRANCH_PENALTY 0x0001 /* XTPERF_CNT_PIPELINE_INTERLOCKS selector mask */ #define XTPERF_MASK_PIPELINE_INTERLOCKS 0x0001 /* XTPERF_CNT_ICACHE_MISSES selector mask */ #define XTPERF_MASK_ICACHE_MISSES 0x0001 /* XTPERF_CNT_DCACHE_MISSES selector mask */ #define XTPERF_MASK_DCACHE_MISSES 0x0001 /* XTPERF_CNT_CYCLES selector mask */ #define XTPERF_MASK_CYCLES 0x0001 /* XTPERF_CNT_OVERFLOW selector mask */ #define XTPERF_MASK_OVERFLOW 0x0001 /* */ #define XTPERF_MASK_INSN_ALL 0x8DFF #define XTPERF_MASK_INSN_JX 0x0001 /* JX */ #define XTPERF_MASK_INSN_CALLX 0x0002 /* CALLXn */ #define XTPERF_MASK_INSN_RET 0x0004 /* call return i.
123
e. RET, RETW */ #define XTPERF_MASK_INSN_RF 0x0008 /* supervisor return i.e. RFDE, RFE, RFI, RFWO, RFWU */ #define XTPERF_MASK_INSN_BRANCH_TAKEN 0x0010 /* Conditional branch taken, or loopgtz/loopnez skips loop */ #define XTPERF_MASK_INSN_J 0x0020 /* J */ #define XTPERF_MASK_INSN_CALL 0x0040 /* CALLn */ #define XTPERF_MASK_INSN_BRANCH_NOT_TAKEN 0x0080 /* Conditional branch fall through (aka. not-taken branch) */ #define XTPERF_MASK_INSN_LOOP_TAKEN 0x0100 /* Loop instr falls into loop (aka. taken loop) */ #define XTPERF_MASK_INSN_LOOP_BEG 0x0400 /* Loopback taken to LBEG */ #define XTPERF_MASK_INSN_LOOP_END 0x0800 /* Loopback falls through to LEND */ #define XTPERF_MASK_INSN_NON_BRANCH 0x8000 /* Non-branch instruction (aka. non-CTI) */ /* */ #define XTPERF_MASK_D_STALL_ALL 0x01FE #define XTPERF_MASK_D_STALL_STORE_BUF_FULL 0x0002 /* Store buffer full stall */ #define XTPERF_MASK_D_STALL_STORE_BUF_CONFLICT 0x0004 /* Store buffer conflict stall */ #define XTPERF_MASK_D_STALL_CACHE_MISS 0x0008 /* DCache-miss stall */ #define XTPERF_MASK_D_STALL_BUSY 0x0010 /* Data RAM/ROM/XLMI busy stall */ #define XTPERF_MASK_D_STALL_IN_PIF 0x0020 /* Data inbound-PIF request stall (incl s32c1i) */ #define XTPERF_MASK_D_STALL_MHT_LOOKUP 0x0040 /* MHT lookup stall */ #define XTPERF_MASK_D_STALL_UNCACHED_LOAD 0x0080 /* Uncached load stall (included in MHT lookup stall) */ #define XTPERF_MASK_D_STALL_BANK_CONFLICT 0x0100 /* Bank-conflict stall */ /* */ #define XTPERF_MASK_I_STALL_ALL 0x01FF #define XTPERF_MASK_I_STALL_CACHE_MISS 0x0001 /* ICache-miss stall */ #define XTPERF_MASK_I_STALL_BUSY 0x0002 /* Instruction RAM/ROM busy stall */ #define XTPERF_MASK_I_STALL_IN_PIF 0x0004 /* Instruction RAM inbound-PIF request stall */ #define XTPERF_MASK_I_STALL_TIE_PORT 0x0008 /* TIE port stall */ #define XTPERF_MASK_I_STALL_EXTERNAL_SIGNAL 0x0010 /* External RunStall signal status */ #define XTPERF_MASK_I_STALL_UNCACHED_FETCH 0x0020 /* Uncached fetch stall */ #define XTPERF_MASK_I_STALL_FAST_L32R 0x0040 /* FastL32R stall */ #define XTPERF_MASK_I_STALL_ITERATIVE_MUL 0x0080 /* Iterative multiply stall */ #define XTPERF_MASK_I_STALL_ITERATIVE_DIV 0x0100 /* Iterative divide stall */ /* */ #define XTPERF_MASK_EXR_ALL 0x01FF #define XTPERF_MASK_EXR_REPLAYS 0x0001 /* Other Pipeline Replay (i.
123
e. excludes $ miss etc.) */ #define XTPERF_MASK_EXR_LEVEL1_INT 0x0002 /* Level-1 interrupt */ #define XTPERF_MASK_EXR_LEVELH_INT 0x0004 /* Greater-than-level-1 interrupt */ #define XTPERF_MASK_EXR_DEBUG 0x0008 /* Debug exception */ #define XTPERF_MASK_EXR_NMI 0x0010 /* NMI */ #define XTPERF_MASK_EXR_WINDOW 0x0020 /* Window exception */ #define XTPERF_MASK_EXR_ALLOCA 0x0040 /* Alloca exception */ #define XTPERF_MASK_EXR_OTHER 0x0080 /* Other exceptions */ #define XTPERF_MASK_EXR_MEM_ERR 0x0100 /* HW-corrected memory error */ /* */ #define XTPERF_MASK_BUBBLES_ALL 0x01FD #define XTPERF_MASK_BUBBLES_PSO 0x0001 /* Processor domain PSO bubble */ #define XTPERF_MASK_BUBBLES_R_HOLD_D_CACHE_MISS 0x0004 /* R hold caused by DCache miss */ #define XTPERF_MASK_BUBBLES_R_HOLD_STORE_RELEASE 0x0008 /* R hold caused by Store release */ #define XTPERF_MASK_BUBBLES_R_HOLD_REG_DEP 0x0010 /* R hold caused by register dependency */ #define XTPERF_MASK_BUBBLES_R_HOLD_WAIT 0x0020 /* R hold caused by MEMW, EXTW or EXCW */ #define XTPERF_MASK_BUBBLES_R_HOLD_HALT 0x0040 /* R hold caused by Halt instruction (TX only) */ #define XTPERF_MASK_BUBBLES_CTI 0x0080 /* CTI bubble (e.
123
g. branch delay slot) */ #define XTPERF_MASK_BUBBLES_WAITI 0x0100 /* WAITI bubble */ /* */ #define XTPERF_MASK_I_TLB_ALL 0x000F #define XTPERF_MASK_I_TLB_HITS 0x0001 /* Hit */ #define XTPERF_MASK_I_TLB_REPLAYS 0x0002 /* Replay of instruction due to ITLB miss */ #define XTPERF_MASK_I_TLB_REFILLS 0x0004 /* HW-assisted TLB Refill completes */ #define XTPERF_MASK_I_TLB_MISSES 0x0008 /* ITLB Miss Exception */ /* */ #define XTPERF_MASK_I_MEM_ALL 0x000F #define XTPERF_MASK_I_MEM_CACHE_HITS 0x0001 /* ICache Hit */ #define XTPERF_MASK_I_MEM_CACHE_MISSES 0x0002 /* ICache Miss (includes uncached) */ #define XTPERF_MASK_I_MEM_IRAM 0x0004 /* InstRAM or InstROM */ #define XTPERF_MASK_I_MEM_BYPASS 0x0008 /* Bypass (i.e. uncached) fetch */ /* */ #define XTPERF_MASK_D_TLB_ALL 0x000F #define XTPERF_MASK_D_TLB_HITS 0x0001 /* Hit */ #define XTPERF_MASK_D_TLB_REPLAYS 0x0002 /* Replay of instruction due to DTLB miss */ #define XTPERF_MASK_D_TLB_REFILLS 0x0004 /* HW-assisted TLB Refill completes */ #define XTPERF_MASK_D_TLB_MISSES 0x0008 /* DTLB Miss Exception */ /* */ #define XTPERF_MASK_D_LOAD_ALL 0x000F #define XTPERF_MASK_D_LOAD_CACHE_HITS 0x0001 /* Cache Hit */ #define XTPERF_MASK_D_LOAD_CACHE_MISSES 0x0002 /* Cache Miss */ #define XTPERF_MASK_D_LOAD_LOCAL_MEM 0x0004 /* Local memory hit */ #define XTPERF_MASK_D_LOAD_BYPASS 0x0008 /* Bypass (i.
123
e. uncached) load */ /* */ #define XTPERF_MASK_D_STORE_ALL 0x000F #define XTPERF_MASK_D_STORE_CACHE_HITS 0x0001 /* DCache Hit */ #define XTPERF_MASK_D_STORE_CACHE_MISSES 0x0002 /* DCache Miss */ #define XTPERF_MASK_D_STORE_LOCAL_MEM 0x0004 /* Local memory hit */ #define XTPERF_MASK_D_STORE_PIF 0x0008 /* PIF Store */ /* */ #define XTPERF_MASK_D_ACCESS_ALL 0x000F #define XTPERF_MASK_D_ACCESS_CACHE_MISSES 0x0001 /* DCache Miss */ #define XTPERF_MASK_D_ACCESS_HITS_SHARED 0x0002 /* Hit Shared */ #define XTPERF_MASK_D_ACCESS_HITS_EXCLUSIVE 0x0004 /* Hit Exclusive */ #define XTPERF_MASK_D_ACCESS_HITS_MODIFIED 0x0008 /* Hit Modified */ /* */ #define XTPERF_MASK_MULTIPLE_LS_ALL 0x003F #define XTPERF_MASK_MULTIPLE_LS_0S_0L 0x0001 /* 0 stores and 0 loads */ #define XTPERF_MASK_MULTIPLE_LS_0S_1L 0x0002 /* 0 stores and 1 loads */ #define XTPERF_MASK_MULTIPLE_LS_1S_0L 0x0004 /* 1 stores and 0 loads */ #define XTPERF_MASK_MULTIPLE_LS_1S_1L 0x0008 /* 1 stores and 1 loads */ #define XTPERF_MASK_MULTIPLE_LS_0S_2L 0x0010 /* 0 stores and 2 loads */ #define XTPERF_MASK_MULTIPLE_LS_2S_0L 0x0020 /* 2 stores and 0 loads */ /* */ #define XTPERF_MASK_OUTBOUND_PIF_ALL 0x0003 #define XTPERF_MASK_OUTBOUND_PIF_CASTOUT 0x0001 /* Castout */ #define XTPERF_MASK_OUTBOUND_PIF_PREFETCH 0x0002 /* Prefetch */ /* */ #define XTPERF_MASK_INBOUND_PIF_ALL 0x0003 #define XTPERF_MASK_INBOUND_PIF_I_DMA 0x0001 /* Instruction DMA */ #define XTPERF_MASK_INBOUND_PIF_D_DMA 0x0002 /* Data DMA */ /* */ #define XTPERF_MASK_PREFETCH_ALL 0x002F #define XTPERF_MASK_PREFETCH_I_HIT 0x0001 /* I prefetch-buffer-lookup hit */ #define XTPERF_MASK_PREFETCH_D_HIT 0x0002 /* D prefetch-buffer-lookup hit */ #define XTPERF_MASK_PREFETCH_I_MISS 0x0004 /* I prefetch-buffer-lookup miss */ #define XTPERF_MASK_PREFETCH_D_MISS 0x0008 /* D prefetch-buffer-lookup miss */ #define XTPERF_MASK_PREFETCH_D_L1_FILL 0x0020 /* Fill directly to DCache L1 */ #if XCHAL_HW_VERSION >= 270004 /* */ #define XTPERF_MASK_IDMA_ALL 0x0001 #define XTPERF_MASK_IDMA_ACTIVE_CYCLES 0x0001 /* Active Cycles */ /* */ #define XTPERF_MASK_INSN_LENGTH_ALL 0x7FFF #define XTPERF_MASK_INSN_LENGTH_16 0x0001 /* 16-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_24 0x0002 /* 24-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_32 0x0004 /* 32-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_40 0x0008 /* 40-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_48 0x0010 /* 48-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_56 0x0020 /* 56-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_64 0x0040 /* 64-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_72 0x0080 /* 72-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_80 0x0100 /* 80-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_88 0x0200 /* 88-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_96 0x0400 /* 96-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_104 0x0800 /* 104-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_112 0x1000 /* 112-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_120 0x2000 /* 120-bit instruction length */ #define XTPERF_MASK_INSN_LENGTH_128 0x4000 /* 128-bit instruction length */ #endif /* HW version >= 270004 */ #endif /* __XT_PERF_CONSTS_H__ */
123
/* */ #ifndef XTRUNTIME_H #define XTRUNTIME_H #include #include #include #ifndef XTSTR #define _XTSTR(x) # x #define XTSTR(x) _XTSTR(x) #endif /* _xtos_core_shutoff() flags parameter values: */ #define XTOS_KEEPON_MEM 0x00000100 /* ==PWRCTL_MEM_WAKEUP */ #define XTOS_KEEPON_MEM_SHIFT 8 #define XTOS_KEEPON_DEBUG 0x00001000 /* ==PWRCTL_DEBUG_WAKEUP */ #define XTOS_KEEPON_DEBUG_SHIFT 12 #define XTOS_IDMA_NO_WAIT 0x00010000 /* Do not wait for idma to finish. Disable if necessary */ #define XTOS_IDMA_WAIT_STANDBY 0x00020000 /* Also treat standby state as the end of wait */ #define XTOS_COREF_PSO 0x00000001 /* do power shutoff */ #define XTOS_COREF_PSO_SHIFT 0 #define _xtos_set_execption_handler _xtos_set_exception_handler /* backward compatibility */ #define _xtos_set_saved_intenable _xtos_ints_on /* backward compatibility */ #define _xtos_clear_saved_intenable _xtos_ints_off /* backward compatibility */ #if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) #ifdef __cplusplus extern "C" { #endif #if defined(XTOS_MISRA) typedef void (_xtos_handler_func)(void *); #elif defined(__cplusplus) typedef void (_xtos_handler_func)(.
124
..); #else typedef void (_xtos_handler_func)(void); #endif typedef _xtos_handler_func *_xtos_handler; /* */ #if !XCHAL_HAVE_INTERRUPTS # define XTOS_SET_INTLEVEL(intlevel) 0 # define XTOS_SET_MIN_INTLEVEL(intlevel) 0 # define XTOS_RESTORE_INTLEVEL(restoreval) # define XTOS_RESTORE_JUST_INTLEVEL(restoreval) #elif XCHAL_HAVE_XEA2 /* In XEA2, we can simply safely set PS.INTLEVEL directly: */ /* NOTE: these asm macros don't modify memory, but they are marked # define XTOS_SET_INTLEVEL(intlevel) __extension__({ unsigned __tmp; \ __asm__ __volatile__( "rsil %0, " XTSTR(intlevel) "\n" \ : "=a" (__tmp) : : "memory" ); \ __tmp;}) # define XTOS_SET_MIN_INTLEVEL(intlevel) ({ unsigned __tmp, __tmp2, __tmp3; \ __asm__ __volatile__( "rsr.ps %0\n" /* get old (current) PS.INTLEVEL */ \ "movi %2, " XTSTR(intlevel) "\n" \ "extui %1, %0, 0, 4\n" /* keep only INTLEVEL bits of parameter */ \ "blt %2, %1, 1f\n" \ "rsil %0, " XTSTR(intlevel) "\n" \ "1:\n" \ : "=a" (__tmp), "=&a" (__tmp2), "=&a" (__tmp3) : : "memory" ); \ __tmp;}) # define XTOS_RESTORE_INTLEVEL(restoreval) do{ unsigned __tmp = (restoreval); \ __asm__ __volatile__( "wsr.
124
ps %0 ; rsync\n" \ : : "a" (__tmp) : "memory" ); \ }while(0) # define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_intlevel(restoreval) #else /* In XEA1, we have to rely on INTENABLE register virtualization: */ extern unsigned _xtos_set_vpri( unsigned vpri ); extern unsigned _xtos_vpri_enabled; /* current virtual priority */ # define XTOS_SET_INTLEVEL(intlevel) _xtos_set_vpri(~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel)) # define XTOS_SET_MIN_INTLEVEL(intlevel) _xtos_set_vpri(_xtos_vpri_enabled & ~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel)) # define XTOS_RESTORE_INTLEVEL(restoreval) _xtos_set_vpri(restoreval) # define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_vpri(restoreval) #endif /* */ /* Enable all interrupts (those activated with _xtos_ints_on()): */ #define XTOS_ENABLE_INTERRUPTS XTOS_SET_INTLEVEL(0) /* Disable low priority level interrupts (they can interact with the OS): */ #define XTOS_DISABLE_LOWPRI_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS) #define XTOS_MASK_LOWPRI_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS) /* Disable interrupts that can interact with the OS: */ #define XTOS_DISABLE_EXCM_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL) #define XTOS_MASK_EXCM_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_EXCM_LEVEL) #if 0 /* XTOS_LOCK_LEVEL is not exported to applications */ /* Disable interrupts that can interact with the OS, or manipulate virtual INTENABLE: */ #define XTOS_DISABLE_LOCK_INTERRUPTS XTOS_SET_INTLEVEL(XTOS_LOCK_LEVEL) #define XTOS_MASK_LOCK_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XTOS_LOCK_LEVEL) #endif /* Disable ALL interrupts (not for common use, particularly if one's processor #define XTOS_DISABLE_ALL_INTERRUPTS XTOS_SET_INTLEVEL(15) /* These two are deprecated.
124
Use the newer functions below. */ extern unsigned int _xtos_ints_off( unsigned int mask ); extern unsigned int _xtos_ints_on( unsigned int mask ); /* Newer functions to enable/disable the specified interrupt. */ static inline void _xtos_interrupt_enable(unsigned int intnum) { _xtos_ints_on(1U = 1 extern void _xtos_dispatch_level1_interrupts( void ); #endif #if XCHAL_NUM_INTLEVELS >= 2 extern void _xtos_dispatch_level2_interrupts( void ); #endif #if XCHAL_NUM_INTLEVELS >= 3 extern void _xtos_dispatch_level3_interrupts( void ); #endif #if XCHAL_NUM_INTLEVELS >= 4 extern void _xtos_dispatch_level4_interrupts( void ); #endif #if XCHAL_NUM_INTLEVELS >= 5 extern void _xtos_dispatch_level5_interrupts( void ); #endif #if XCHAL_NUM_INTLEVELS >= 6 extern void _xtos_dispatch_level6_interrupts( void ); #endif /* Deprecated (but kept because they were documented): */ extern unsigned int _xtos_read_ints( void ); extern void _xtos_clear_ints( unsigned int mask ); /* Power shut-off related routines.
124
*/ extern int _xtos_core_shutoff(unsigned flags); extern int _xtos_core_save(unsigned flags, XtosCoreState *savearea, void *code); extern void _xtos_core_restore(unsigned retvalue, XtosCoreState *savearea); #if XCHAL_NUM_CONTEXTS > 1 extern unsigned _xtos_init_context(int context_num, int stack_size, _xtos_handler_func *start_func, int arg1); #endif /* Deprecated: */ #if XCHAL_NUM_TIMERS > 0 extern void _xtos_timer_0_delta( int cycles ); #endif #if XCHAL_NUM_TIMERS > 1 extern void _xtos_timer_1_delta( int cycles ); #endif #if XCHAL_NUM_TIMERS > 2 extern void _xtos_timer_2_delta( int cycles ); #endif #if XCHAL_NUM_TIMERS > 3 extern void _xtos_timer_3_delta( int cycles ); #endif #ifdef __cplusplus } #endif #endif /* !_ASMLANGUAGE && !__ASSEMBLER__ */ #endif /* XTRUNTIME_H */
124
/* TRAX register definitions Copyright (c) 2006-2012 Tensilica Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
125
*/ #ifndef _TRAX_REGISTERS_H_ #define _TRAX_REGISTERS_H_ #define SHOW 1 #define HIDE 0 #define RO 0 #define RW 1 /* TRAX Register Numbers (from possible range of 0..127) */ #if 0 #define TRAXREG_ID 0 #define TRAXREG_CONTROL 1 #define TRAXREG_STATUS 2 #define TRAXREG_DATA 3 #define TRAXREG_ADDRESS 4 #define TRAXREG_TRIGGER 5 #define TRAXREG_MATCH 6 #define TRAXREG_DELAY 7 #define TRAXREG_STARTADDR 8 #define TRAXREG_ENDADDR 9 /* Internal use only (unpublished): */ #define TRAXREG_P4CHANGE 16 #define TRAXREG_P4REV 17 #define TRAXREG_P4DATE 18 #define TRAXREG_P4TIME 19 #define TRAXREG_PDSTATUS 20 #define TRAXREG_PDDATA 21 #define TRAXREG_STOP_PC 22 #define TRAXREG_STOP_ICNT 23 #define TRAXREG_MSG_STATUS 24 #define TRAXREG_FSM_STATUS 25 #define TRAXREG_IB_STATUS 26 #define TRAXREG_MAX 27 #define TRAXREG_ITCTRL 96 #endif /* The registers above match the NAR addresses. So, their values are used for NAR access */ /* TRAX Register Fields */ /* TRAX ID register fields: */ #define TRAX_ID_PRODNO 0xf0000000 /* product number (0=TRAX) */ #define TRAX_ID_PRODOPT 0x0f000000 /* product options */ #define TRAX_ID_MIW64 0x08000000 /* opt: instruction width */ #define TRAX_ID_AMTRAX 0x04000000 /* opt: collection of options, internal (VER_2_0 or later)*/ #define TRAX_ID_MAJVER(id) (((id) >> 20) & 0x0f) #define TRAX_ID_MINVER(id) (((id) >> 17) & 0x07) #define TRAX_ID_VER(id) ((TRAX_ID_MAJVER(id)> TRAX_STATUS_MEMSZ_SHIFT)) #if 0 /* Describes a field within a register: */ typedef struct { const char* name; // unsigned width; // unsigned shift; char width; char shift; char visible; /* 0 = internal use only, 1 = shown */ char reserved; } trax_regfield_t; #endif /* Describes a TRAX register: */ typedef struct { const char* name; unsigned id; char width; char visible; char writable; char reserved; //const trax_regfield_t * fieldset; } trax_regdef_t; extern const trax_regdef_t trax_reglist[]; extern const signed int trax_readable_regs[]; extern const signed int trax_unamed_header_regs[]; #ifdef __cplusplus extern "C" { #endif /* Prototypes: */ extern int trax_find_reg(char * regname, char **errmsg); extern const char * trax_regname(int regno); #ifdef __cplusplus } #endif #endif /* _TRAX_REGISTERS_H_ */
125
/* xdm-regs.h - Common register and related definitions for the XDM (Xtensa Debug Module) */ /* Copyright (c) 2016 Cadence Design Systems Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
126
*/ #ifndef _XDM_REGS_H_ #define _XDM_REGS_H_ /* NOTE: This header file is included by C, assembler, and other sources. So any C-specific or asm-specific content must be appropriately #ifdef'd. */ /* */ /* FIXME: maybe provide only MISC+CS registers here, and leave specific subsystem registers in separate headers? eg. for TRAX, PERF, OCD */ /* XDM_.... ERI addr [NAR addr] Description...... */ /* TRAX */ #define XDM_TRAX_ID 0x100000 /*[0x00] ID */ #define XDM_TRAX_CONTROL 0x100004 /*[0x01] Control */ #define XDM_TRAX_STATUS 0x100008 /*[0x02] Status */ #define XDM_TRAX_DATA 0x10000C /*[0x03] Data */ #define XDM_TRAX_ADDRESS 0x100010 /*[0x04] Address */ #define XDM_TRAX_TRIGGER 0x100014 /*[0x05] Stop PC */ #define XDM_TRAX_MATCH 0x100018 /*[0x06] Stop PC Range */ #define XDM_TRAX_DELAY 0x10001C /*[0x07] Post Stop Trigger Capture Size */ #define XDM_TRAX_STARTADDR 0x100020 /*[0x08] Trace Memory Start */ #define XDM_TRAX_ENDADDR 0x100024 /*[0x09] Trace Memory End */ #define XDM_TRAX_DEBUGPC 0x10003C /*[0x0F] Debug PC */ #define XDM_TRAX_P4CHANGE 0x100040 /*[0x10] X */ #define XDM_TRAX_TIME0 0x100040 /*[0x10] First Time Register */ #define XDM_TRAX_P4REV 0x100044 /*[0x11] X */ #define XDM_TRAX_TIME1 0x100044 /*[0x11] Second Time Register */ #define XDM_TRAX_P4DATE 0x100048 /*[0x12] X */ #define XDM_TRAX_INTTIME_MAX 0x100048 /*[0x12] maximal Value of Timestamp IntTime */ #define XDM_TRAX_P4TIME 0x10004C /*[0x13] X */ #define XDM_TRAX_PDSTATUS 0x100050 /*[0x14] Sample of PDebugStatus */ #define XDM_TRAX_PDDATA 0x100054 /*[0x15] Sample of PDebugData */ #define XDM_TRAX_STOP_PC 0x100058 /*[0x16] X */ #define XDM_TRAX_STOP_ICNT 0x10005C /*[0x16] X */ #define XDM_TRAX_MSG_STATUS 0x100060 /*[0x17] X */ #define XDM_TRAX_FSM_STATUS 0x100064 /*[0x18] X */ #define XDM_TRAX_IB_STATUS 0x100068 /*[0x19] X */ #define XDM_TRAX_STOPCNT 0x10006C /*[0x1A] X */ /* Performance Monitoring Counters */ #define XDM_PERF_PMG 0x101000 /*[0x20] perf.
126
mon. global control register */ #define XDM_PERF_INTPC 0x101010 /*[0x24] perf. mon. interrupt PC */ #define XDM_PERF_PM0 0x101080 /*[0x28] perf. mon. counter 0 value */ #define XDM_PERF_PM1 0x101084 /*[0x29] perf. mon. counter 1 value */ #define XDM_PERF_PM2 0x101088 /*[0x2A] perf. mon. counter 2 value */ #define XDM_PERF_PM3 0x10108C /*[0x2B] perf. mon. counter 3 value */ #define XDM_PERF_PM4 0x101090 /*[0x2C] perf. mon. counter 4 value */ #define XDM_PERF_PM5 0x101094 /*[0x2D] perf. mon. counter 5 value */ #define XDM_PERF_PM6 0x101098 /*[0x2E] perf. mon. counter 6 value */ #define XDM_PERF_PM7 0x10109C /*[0x2F] perf. mon. counter 7 value */ #define XDM_PERF_PM(n) (0x101080+((n)= 0x40) && (val = 0x2000) && ((val & 0xFFFF) >2) & 0x1F) \ : ((a)&0x3E00)==0x1000 ? (0x20 | (((a)>>2) & 7) | (((a)>>4) & 0x18)) \ : ((a)&0x3FC0)==0x2000 ? (0x40 | (((a)>>2) & 0xF)) \ : ((a)&0x3FE0)==0x3020 ? (0x50 | (((a)>>2) & 0xF)) \ : ((a)&0x3FFC)==0x3F00 ? 0x60 \ : ((a)&0x3F80)==0x3F80 ?
126
(0x60 | (((a)>>2) & 0x1F)) \ : -1 ) #define XDM_ERI_TO_NAR(a) _XDM_ERI_TO_NAR(a & 0xFFFF) /* Convert 7-bit NAR address back to ERI/APB address/offset: */ #define _XDM_NAR_TO_APB(a) ((a) = 0x20 && (a) = 0x40 && (a) = 0x58 && (a) = 0x68 && (a) <= 0x7F ? (0x3F80 | (((a)&0x1F) << 2)) \ : -1) #define XDM_NAR_TO_APB(a) _XDM_NAR_TO_APB((a & 0xFFFF)) #define XDM_NAR_TO_ERI(a) _XDM_NAR_TO_APB((a & 0xFFFF)) | 0x100000 /* Convert APB to ERI address */ #define XDM_APB_TO_ERI(a) ((a) | (0x100000)) #define XDM_ERI_TO_APB(a) ((a) & (0x0FFFFF)) / Bit definitions within some of the above registers / #define OCD_ID_LSDDRP 0x01000000 #define OCD_ID_LSDDRP_SHIFT 24 #define OCD_ID_ENDIANESS 0x00000001 #define OCD_ID_ENDIANESS_SHIFT 0 #define OCD_ID_PSO 0x0000000C #define OCD_ID_PSO_SHIFT 2 #define OCD_ID_TRACEPORT 0x00000080 #define OCD_ID_TRACEPORT_SHIFT 7 #define OCD_ID_LSDDRP_XEA3 0x00000400 /* Power Status register. NOTE: different bit positions in JTAG vs. ERI/APB !
126
! */ /* ERI/APB: */ #define PWRSTAT_CORE_DOMAIN_ON 0x00000001 /* set if core is powered on */ #define PWRSTAT_CORE_DOMAIN_ON_SHIFT 0 #define PWRSTAT_WAKEUP_RESET 0x00000002 /* [ERI only] 0=cold start, 1=PSO wakeup */ #define PWRSTAT_WAKEUP_RESET_SHIFT 1 #define PWRSTAT_CACHES_LOST_POWER 0x00000004 /* [ERI only] set if caches (/localmems?) lost power */ /* FIXME: does this include local memories? */ #define PWRSTAT_CACHES_LOST_POWER_SHIFT 2 #define PWRSTAT_CORE_STILL_NEEDED 0x00000010 /* set if others keeping core awake */ #define PWRSTAT_CORE_STILL_NEEDED_SHIFT 4 #define PWRSTAT_MEM_DOMAIN_ON 0x00000100 /* set if memory domain is powered on */ #define PWRSTAT_MEM_DOMAIN_ON_SHIFT 8 #define PWRSTAT_DEBUG_DOMAIN_ON 0x00001000 /* set if debug domain is powered on */ #define PWRSTAT_DEBUG_DOMAIN_ON_SHIFT 12 #define PWRSTAT_ALL_ON (PWRSTAT_CORE_DOMAIN_ON | PWRSTAT_MEM_DOMAIN_ON | PWRSTAT_DEBUG_DOMAIN_ON) #define PWRSTAT_CORE_WAS_RESET 0x00010000 /* [APB only] set if core got reset */ #define PWRSTAT_CORE_WAS_RESET_SHIFT 16 #define PWRSTAT_DEBUG_WAS_RESET 0x10000000 /* set if debug module got reset */ #define PWRSTAT_DEBUG_WAS_RESET_SHIFT 28 /* JTAG: */ #define J_PWRSTAT_CORE_DOMAIN_ON 0x01 /* set if core is powered on */ #define J_PWRSTAT_MEM_DOMAIN_ON 0x02 /* set if memory domain is powered on */ #define J_PWRSTAT_DEBUG_DOMAIN_ON 0x04 /* set if debug domain is powered on */ #define J_PWRSTAT_ALL_ON (J_PWRSTAT_CORE_DOMAIN_ON | J_PWRSTAT_MEM_DOMAIN_ON | J_PWRSTAT_DEBUG_DOMAIN_ON) #define J_PWRSTAT_CORE_STILL_NEEDED 0x08 /* set if others keeping core awake */ #define J_PWRSTAT_CORE_WAS_RESET 0x10 /* set if core got reset */ #define J_PWRSTAT_DEBUG_WAS_RESET 0x40 /* set if debug module got reset */ /* Power Control register.
126
NOTE: different bit positions in JTAG vs. ERI/APB !! */ /* ERI/APB: */ #define PWRCTL_CORE_SHUTOFF 0x00000001 /* [ERI only] core wants to shut off on WAITI */ #define PWRCTL_CORE_SHUTOFF_SHIFT 0 #define PWRCTL_CORE_WAKEUP 0x00000001 /* [APB only] set to force core to stay powered on */ #define PWRCTL_CORE_WAKEUP_SHIFT 0 #define PWRCTL_MEM_WAKEUP 0x00000100 /* set to force memory domain to stay powered on */ #define PWRCTL_MEM_WAKEUP_SHIFT 8 #define PWRCTL_DEBUG_WAKEUP 0x00001000 /* set to force debug domain to stay powered on */ #define PWRCTL_DEBUG_WAKEUP_SHIFT 12 #define PWRCTL_ALL_ON (PWRCTL_CORE_WAKEUP | PWRCTL_MEM_WAKEUP | PWRCTL_DEBUG_WAKEUP) #define PWRCTL_CORE_RESET 0x00010000 /* [APB only] set to assert core reset */ #define PWRCTL_CORE_RESET_SHIFT 16 #define PWRCTL_DEBUG_RESET 0x10000000 /* set to assert debug module reset */ #define PWRCTL_DEBUG_RESET_SHIFT 28 /* JTAG: */ #define J_PWRCTL_CORE_WAKEUP 0x01 /* set to force core to stay powered on */ #define J_PWRCTL_MEM_WAKEUP 0x02 /* set to force memory domain to stay powered on */ #define J_PWRCTL_DEBUG_WAKEUP 0x04 /* set to force debug domain to stay powered on */ #define J_DEBUG_USE 0x80 /* */ #define J_PWRCTL_ALL_ON (J_DEBUG_USE | J_PWRCTL_CORE_WAKEUP | J_PWRCTL_MEM_WAKEUP | J_PWRCTL_DEBUG_WAKEUP) #define J_PWRCTL_DEBUG_ON J_DEBUG_USE | J_PWRCTL_DEBUG_WAKEUP #define J_PWRCTL_CORE_RESET 0x10 /* set to assert core reset */ #define J_PWRCTL_DEBUG_RESET 0x40 /* set to assert debug module reset */ #define J_PWRCTL_WRITE_MASK 0xFF #define J_PWRSTAT_WRITE_MASK 0xFF #define PWRCTL_WRITE_MASK ~0 #define PWRSTAT_WRITE_MASK ~0 / The following are only relevant for JTAG, so perhaps belong in OCD only / /* XDM 5-bit JTAG Instruction Register (IR) values: */ #define XDM_IR_PWRCTL 0x08 /* select 8-bit Power/Reset Control (PRC) */ #define XDM_IR_PWRSTAT 0x09 /* select 8-bit Power/Reset Status (PRS) */ #define XDM_IR_NAR_SEL 0x1c /* select altern.
126
8-bit NAR / 32-bit NDR (Nexus-style) */ #define XDM_IR_NDR_SEL 0x1d /* select altern. 32-bit NDR / 8-bit NAR (FIXME - functionality not yet in HW) */ #define XDM_IR_IDCODE 0x1e /* select 32-bit JTAG IDCODE */ #define XDM_IR_BYPASS 0x1f /* select 1-bit bypass */ #define XDM_IR_WIDTH 5 /* width of IR for Xtensa TAP */ /* NAR register bits: */ #define XDM_NAR_WRITE 0x01 #define XDM_NAR_ADDR_MASK 0xFE #define XDM_NAR_ADDR_SHIFT 1 #define XDM_NAR_BUSY 0x02 #define XDM_NAR_ERROR 0x01 #define NEXUS_DIR_READ 0x00 #define NEXUS_DIR_WRITE 0x01 / Define DCR register bits / #define DCR_ENABLEOCD 0x0000001 #define DCR_ENABLEOCD_SHIFT 0 #define DCR_DEBUG_INT 0x0000002 #define DCR_DEBUG_INT_SHIFT 1 #define DCR_DEBUG_OVERRIDE 0x0000004 //ER or later #define DCR_DEBUG_OVERRIDE_SHIFT 2 #define DCR_DEBUG_SS_REQ 0x0000008 #define DCR_DEBUG_SS_REQ_SHIFT 3 #define DCR_DEBUG_OVERRIDE_CW 0x0000010 //RD and earlier #define DCR_DEBUG_OVERRIDE_CW_SHIFT 4 #define DCR_MASK_NMI 0x0000020 #define DCR_MASK_NMI_SHIFT 5 #define DCR_STEP_ENABLE 0x0000040 #define DCR_STEP_ENABLE_SHIFT 6 #define DCR_BREAK_IN_EN 0x0010000 #define DCR_BREAK_IN_EN_SHIFT 16 #define DCR_BREAK_OUT_EN 0x0020000 #define DCR_BREAK_OUT_EN_SHIFT 17 #define DCR_DEBUG_INT_EN 0x0040000 #define DCR_DEBUG_INT_EN_SHIFT 18 #define DCR_DBG_SW_ACTIVE 0x0100000 #define DCR_DBG_SW_ACTIVE_SHIFT 20 #define DCR_STALL_IN_EN 0x0200000 #define DCR_STALL_IN_EN_SHIFT 21 #define DCR_DEBUG_OUT_EN 0x0400000 #define DCR_DEBUG_OUT_EN_SHIFT 22 #define DCR_BREAK_OUT_ITO 0x1000000 #define DCR_STALL_OUT_ITO 0x2000000 #define DCR_STALL_OUT_ITO_SHIFT 25 / Define DSR register bits / #define DOSR_STOP_CAUSE_SHIFT 5 #define DOSR_STOP_CAUSE_MASK 0xF #define DOSR_EXECDONE_SHIFT 0 #define DOSR_EXECDONE_ER 0x01 #define DOSR_EXECDONE_SHIFT 0 #define DOSR_EXCEPTION_ER 0x02 #define DOSR_EXCEPTION_SHIFT 1 #define DOSR_BUSY 0x04 #define DOSR_BUSY_SHIFT 2 #define DOSR_OVERRUN 0x08 #define DOSR_OVERRUN_SHIFT 3 #define DOSR_INOCDMODE_ER 0x10 #define DOSR_INOCDMODE_SHIFT 4 #define DOSR_CORE_WROTE_DDR_ER 0x400 #define DOSR_CORE_WROTE_DDR_SHIFT 10 #define DOSR_CORE_READ_DDR_ER 0x800 #define DOSR_CORE_READ_DDR_SHIFT 11 #define DOSR_HOST_WROTE_DDR_ER 0x4000 #define DOSR_HOST_WROTE_DDR_SHIFT 14 #define DOSR_HOST_READ_DDR_ER 0x8000 #define DOSR_HOST_READ_DDR_SHIFT 15 #define DOSR_DEBUG_PEND_BIN 0x10000 #define DOSR_DEBUG_PEND_HOST 0x20000 #define DOSR_DEBUG_PEND_TRAX 0x40000 #define DOSR_DEBUG_BIN 0x100000 #define DOSR_DEBUG_HOST 0x200000 #define DOSR_DEBUG_TRAX 0x400000 #define DOSR_DEBUG_PEND_BIN_SHIFT 16 #define DOSR_DEBUG_PEND_HOST_SHIFT 17 #define DOSR_DEBUG_PEND_TRAX_SHIFT 18 #define DOSR_DEBUG_BREAKIN 0x0100000 #define DOSR_DEBUG_BREAKIN_SHIFT 20 #define DOSR_DEBUG_HOST_SHIFT 21 #define DOSR_DEBUG_TRAX_SHIFT 22 #define DOSR_DEBUG_STALL 0x1000000 #define DOSR_DEBUG_STALL_SHIFT 24 #define DOSR_CORE_ON 0x40000000 #define DOSR_CORE_ON_SHIFT 30 #define DOSR_DEBUG_ON 0x80000000 #define DOSR_DEBUG_ON_SHIFT 31 / Performance monitor registers bits / #define PERF_PMG_ENABLE 0x00000001 /* global enable bit */ #define PERF_PMG_ENABLE_SHIFT 0 #define PERF_PMCTRL_INT_ENABLE 0x00000001 /* assert interrupt on overflow */ #define PERF_PMCTRL_INT_ENABLE_SHIFT 0 #define PERF_PMCTRL_KRNLCNT 0x00000008 /* ignore TRACELEVEL */ #define PERF_PMCTRL_KRNLCNT_SHIFT 3 #define PERF_PMCTRL_TRACELEVEL 0x000000F0 /* count when CINTLEVEL <= TRACELEVEL */ #define PERF_PMCTRL_TRACELEVEL_SHIFT 4 #define PERF_PMCTRL_SELECT 0x00001F00 /* events group selector */ #define PERF_PMCTRL_SELECT_SHIFT 8 #define PERF_PMCTRL_MASK 0xFFFF0000 /* events mask */ #define PERF_PMCTRL_MASK_SHIFT 16 #define PERF_PMSTAT_OVERFLOW 0x00000001 /* counter overflowed */ #define PERF_PMSTAT_OVERFLOW_SHIFT 0 #define PERF_PMSTAT_INT 0x00000010 /* interrupt asserted */ #define PERF_PMSTAT_INT_SHIFT 4 #if defined (USE_XDM_REGNAME) || defined (USE_DAP_REGNAME) /* Describes XDM register: */ typedef struct { int reg; char* name; } regdef_t; /* */ static char* regname(regdef_t* list, int reg) { int i = 0; while (list[i].
126
reg != -1) { if (list[i].reg == reg) break; i++; } return list[i].name; } #if defined (USE_XDM_REGNAME) static regdef_t xdm_reglist[] = { {XDM_OCD_DSR ,"DOSR" }, {XDM_OCD_DDR ,"DDR" }, {XDM_OCD_DDREXEC ,"DDREXEC" }, {XDM_OCD_DIR0EXEC ,"DIR0EXEC"}, {XDM_OCD_DCR_CLR ,"DCR_CLR" }, {XDM_OCD_DCR_SET ,"DCR_SET" }, {XDM_TRAX_CONTROL ,"CONTROL" }, {XDM_TRAX_STATUS ,"STATUS" }, {XDM_TRAX_DATA ,"DATA" }, {XDM_TRAX_ADDRESS ,"ADDRESS" }, {XDM_TRAX_ID ,"TRAX_ID" }, {XDM_TRAX_TRIGGER ,"TRIGGER PC" }, {XDM_TRAX_MATCH ,"PC MATCH" }, {XDM_TRAX_DELAY ,"DELAY CNT." }, {XDM_TRAX_STARTADDR ,"START ADDRESS"}, {XDM_TRAX_ENDADDR ,"END ADDRESS" }, {XDM_TRAX_DEBUGPC ,"DEBUG PC" }, {XDM_TRAX_P4CHANGE ,"P4 CHANGE" }, {XDM_TRAX_P4REV ,"P4 REV." }, {XDM_TRAX_P4DATE ,"P4 DATE" }, {XDM_TRAX_P4TIME ,"P4 TIME" }, {XDM_TRAX_PDSTATUS ,"PD STATUS" }, {XDM_TRAX_PDDATA ,"PD DATA" }, {XDM_TRAX_STOP_PC ,"STOP PC" }, {XDM_TRAX_STOP_ICNT ,"STOP ICNT" }, {XDM_TRAX_MSG_STATUS,"MSG STAT.
126
" }, {XDM_TRAX_FSM_STATUS,"FSM STAT." }, {XDM_TRAX_IB_STATUS ,"IB STAT." }, {XDM_OCD_ID ,"OCD_ID" }, {XDM_OCD_DIR0 ,"DIR0" }, {XDM_OCD_DIR1 ,"DIR1" }, {XDM_OCD_DIR2 ,"DIR2" }, {XDM_OCD_DIR3 ,"DIR3" }, {XDM_OCD_DIR4 ,"DIR4" }, {XDM_OCD_DIR5 ,"DIR5" }, {XDM_OCD_DIR6 ,"DIR6" }, {XDM_OCD_DIR7 ,"DIR7" }, {XDM_PERF_PMG ,"PMG" }, {XDM_PERF_INTPC ,"INTPC" }, {XDM_PERF_PM0 ,"PM0 " }, {XDM_PERF_PM1 ,"PM1 " }, {XDM_PERF_PM2 ,"PM2 " }, {XDM_PERF_PM3 ,"PM3 " }, {XDM_PERF_PM4 ,"PM4 " }, {XDM_PERF_PM5 ,"PM5 " }, {XDM_PERF_PM6 ,"PM6 " }, {XDM_PERF_PM7 ,"PM7 " }, {XDM_PERF_PMCTRL0 ,"PMCTRL0"}, {XDM_PERF_PMCTRL1 ,"PMCTRL1"}, {XDM_PERF_PMCTRL2 ,"PMCTRL2"}, {XDM_PERF_PMCTRL3 ,"PMCTRL3"}, {XDM_PERF_PMCTRL4 ,"PMCTRL4"}, {XDM_PERF_PMCTRL5 ,"PMCTRL5"}, {XDM_PERF_PMCTRL6 ,"PMCTRL6"}, {XDM_PERF_PMCTRL7 ,"PMCTRL7"}, {XDM_PERF_PMSTAT0 ,"PMSTAT0"}, {XDM_PERF_PMSTAT1 ,"PMSTAT1"}, {XDM_PERF_PMSTAT2 ,"PMSTAT2"}, {XDM_PERF_PMSTAT3 ,"PMSTAT3"}, {XDM_PERF_PMSTAT4 ,"PMSTAT4"}, {XDM_PERF_PMSTAT5 ,"PMSTAT5"}, {XDM_PERF_PMSTAT6 ,"PMSTAT6"}, {XDM_PERF_PMSTAT7 ,"PMSTAT7"}, {XDM_MISC_PWRCTL ,"PWRCTL" }, {XDM_MISC_PWRSTAT ,"PWRSTAT" }, {XDM_MISC_ERISTAT ,"ERISTAT" }, {XDM_MISC_DATETIME ,"DATETIME"}, {XDM_MISC_UBID ,"UBID" }, {XDM_MISC_CID ,"CID" }, {XDM_CS_ITCTRL ,"ITCTRL" }, {XDM_CS_CLAIMSET ,"CLAIMSET" }, {XDM_CS_CLAIMCLR ,"CLAIMCLR" }, {XDM_CS_LOCK_ACCESS ,"LOCK_ACCESS"}, {XDM_CS_LOCK_STATUS ,"LOCK_STATUS"}, {XDM_CS_AUTH_STATUS ,"AUTH_STATUS"}, {XDM_CS_DEV_ID ,"DEV_ID" }, {XDM_CS_DEV_TYPE ,"DEV_TYPE" }, {XDM_CS_PER_ID4 ,"PER_ID4" }, {XDM_CS_PER_ID5 ,"PER_ID5" }, {XDM_CS_PER_ID6 ,"PER_ID6" }, {XDM_CS_PER_ID7 ,"PER_ID7" }, {XDM_CS_PER_ID0 ,"PER_ID0" }, {XDM_CS_PER_ID1 ,"PER_ID1" }, {XDM_CS_PER_ID2 ,"PER_ID2" }, {XDM_CS_PER_ID3 ,"PER_ID3" }, {XDM_CS_COMP_ID0 ,"COMP_ID0" }, {XDM_CS_COMP_ID1 ,"COMP_ID1" }, {XDM_CS_COMP_ID2 ,"COMP_ID2" }, {XDM_CS_COMP_ID3 ,"COMP_ID3" }, {-1 ,"?
126
??" }, }; #endif #endif #endif /* _XDM_REGS_H_ */
126
/* */ /* */ #ifndef XTENSA_CACHE_H #define XTENSA_CACHE_H #include /* Only define things for C code. */ #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) / CACHE / /* All the macros are in the lower case now and some of them */ /*** INSTRUCTION CACHE ***/ #define XTHAL_USE_CACHE_MACROS #if XCHAL_ICACHE_SIZE > 0 # define xthal_icache_line_invalidate(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("ihi %0, 0" :: "a"(__a) : "memory"); \ } while(0) #else # define xthal_icache_line_invalidate(addr) do {/*nothing*/} while(0) #endif #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE # define xthal_icache_line_lock(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("ipfl %0, 0" :: "a"(__a) : "memory"); \ } while(0) # define xthal_icache_line_unlock(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("ihu %0, 0" :: "a"(__a) : "memory"); \ } while(0) #else # define xthal_icache_line_lock(addr) do {/*nothing*/} while(0) # define xthal_icache_line_unlock(addr) do {/*nothing*/} while(0) #endif /* */ #define xthal_icache_sync() \ __asm__ __volatile__("isync":::"memory") /*** DATA CACHE ***/ #if XCHAL_DCACHE_SIZE > 0 # include # define xthal_dcache_line_invalidate(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("dhi %0, 0" :: "a"(__a) : "memory"); \ } while(0) # define xthal_dcache_line_writeback(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("dhwb %0, 0" :: "a"(__a) : "memory"); \ } while(0) # define xthal_dcache_line_writeback_inv(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("dhwbi %0, 0" :: "a"(__a) : "memory"); \ } while(0) # define xthal_dcache_sync() \ __asm__ __volatile__("" /*"dsync"?
127
*/:::"memory") # define xthal_dcache_line_prefetch_for_read(addr) do { \ XT_DPFR((const int*)addr, 0); \ } while(0) #else # define xthal_dcache_line_invalidate(addr) do {/*nothing*/} while(0) # define xthal_dcache_line_writeback(addr) do {/*nothing*/} while(0) # define xthal_dcache_line_writeback_inv(addr) do {/*nothing*/} while(0) # define xthal_dcache_sync() __asm__ __volatile__("":::"memory") # define xthal_dcache_line_prefetch_for_read(addr) do {/*nothing*/} while(0) #endif #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE # define xthal_dcache_line_lock(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("dpfl %0, 0" :: "a"(__a) : "memory"); \ } while(0) # define xthal_dcache_line_unlock(addr) do { void *__a = (void*)(addr); \ __asm__ __volatile__("dhu %0, 0" :: "a"(__a) : "memory"); \ } while(0) #else # define xthal_dcache_line_lock(addr) do {/*nothing*/} while(0) # define xthal_dcache_line_unlock(addr) do {/*nothing*/} while(0) #endif #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK # define xthal_dcache_line_prefetch_for_write(addr) do { \ XT_DPFW((const int*)addr, 0); \ } while(0) #else # define xthal_dcache_line_prefetch_for_write(addr) do {/*nothing*/} while(0) #endif / Block Operations / #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS /* upgrades */ # define _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, type) \ { \ type((const int*)addr, size); \ } /*downgrades */ # define _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type) \ unsigned _s = size; \ unsigned _a = (unsigned) addr; \ do { \ unsigned __s = (_s > XCHAL_DCACHE_SIZE) ?
127
\ XCHAL_DCACHE_SIZE : _s; \ type((const int*)_a, __s); \ _s -= __s; \ _a += __s; \ } while(_s > 0); # define _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, type, max) \ if (max 0 && XCHAL_HAVE_CACHE_BLOCKOPS && XCHAL_DCACHE_IS_WRITEBACK # define xthal_dcache_block_prefetch_for_write(addr, size) do { \ _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \ } while(0) # define xthal_dcache_block_prefetch_modify(addr, size) do { \ _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_B); \ } while(0) # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do { \ _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \ } while(0) # define xthal_dcache_block_prefetch_modify_grp(addr, size) do { \ _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_BF); \ } while(0) #else # define xthal_dcache_block_prefetch_for_write(addr, size) do {/*nothing*/} while(0) # define xthal_dcache_block_prefetch_modify(addr, size) do {/*nothing*/} while(0) # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do {/*nothing*/} while(0) # define xthal_dcache_block_prefetch_modify_grp(addr, size) do {/*nothing*/} while(0) #endif / INTERRUPTS / /* */ #if XCHAL_HAVE_INTERRUPTS # define XTHAL_GET_INTENABLE() ({ int __intenable; \ __asm__("rsr.
127
intenable %0" : "=a"(__intenable)); \ __intenable; }) # define XTHAL_SET_INTENABLE(v) do { int __intenable = (int)(v); \ __asm__ __volatile__("wsr.intenable %0" :: "a"(__intenable):"memory"); \ } while(0) # define XTHAL_GET_INTERRUPT() ({ int __interrupt; \ __asm__ __volatile__("rsr.interrupt %0" : "=a"(__interrupt)); \ __interrupt; }) #ifdef __clang__ // TODO: LLVM-195. Currently clang does not support INTSET alias for INTERRUPT special reg # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \ __asm__ __volatile__("wsr.interrupt %0" :: "a"(__interrupt):"memory"); \ } while(0) #else # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \ __asm__ __volatile__("wsr.intset %0" :: "a"(__interrupt):"memory"); \ } while(0) #endif # define XTHAL_SET_INTCLEAR(v) do { int __interrupt = (int)(v); \ __asm__ __volatile__("wsr.intclear %0" :: "a"(__interrupt):"memory"); \ } while(0) # define XTHAL_GET_CCOUNT() ({ int __ccount; \ __asm__ __volatile__("rsr.
127
ccount %0" : "=a"(__ccount)); \ __ccount; }) # define XTHAL_SET_CCOUNT(v) do { int __ccount = (int)(v); \ __asm__ __volatile__("wsr.ccount %0" :: "a"(__ccount):"memory"); \ } while(0) # define _XTHAL_GET_CCOMPARE(n) ({ int __ccompare; \ __asm__("rsr.ccompare" #n " %0" : "=a"(__ccompare)); \ __ccompare; }) # define XTHAL_GET_CCOMPARE(n) _XTHAL_GET_CCOMPARE(n) # define _XTHAL_SET_CCOMPARE(n,v) do { int __ccompare = (int)(v); \ __asm__ __volatile__("wsr.ccompare" #n " %0 ; esync" :: "a"(__ccompare):"memory"); \ } while(0) # define XTHAL_SET_CCOMPARE(n,v) _XTHAL_SET_CCOMPARE(n,v) #else # define XTHAL_GET_INTENABLE() 0 # define XTHAL_SET_INTENABLE(v) do {/*nothing*/} while(0) # define XTHAL_GET_INTERRUPT() 0 # define XTHAL_SET_INTSET(v) do {/*nothing*/} while(0) # define XTHAL_SET_INTCLEAR(v) do {/*nothing*/} while(0) # define XTHAL_GET_CCOUNT() 0 # define XTHAL_SET_CCOUNT(v) do {/*nothing*/} while(0) # define XTHAL_GET_CCOMPARE(n) 0 # define XTHAL_SET_CCOMPARE(n,v) do {/*nothing*/} while(0) #endif /* New functions added to accomodate XEA3 and allow deprecation of older functions.
127
For this release they just map to the older ones. */ /* Enables the specified interrupt. */ static inline void xthal_interrupt_enable(unsigned intnum) { xthal_int_enable(1 = 2200 __asm__ __volatile__ ( " wsr.scompare1 %2 \n" " s32c1i %0, %3, 0 \n" : "=a"(result) : "0" (setval), "a" (testval), "a" (addr) : "memory"); #elif XCHAL_HAVE_INTERRUPTS int tmp = 0; // clang complains on unitialized var __asm__ __volatile__ ( " rsil %4, 15 \n" // %4 == saved ps " l32i %0, %3, 0 \n" // %0 == value to test, return val " bne %2, %0, 9f \n" // test " s32i %1, %3, 0 \n" // write the new value "9: wsr.ps %4 ; rsync \n" // restore the PS : "=a"(result) : "0" (setval), "a" (testval), "a" (addr), "a" (tmp) : "memory"); #else __asm__ __volatile__ ( " l32i %0, %3, 0 \n" // %0 == value to test, return val " bne %2, %0, 9f \n" // test " s32i %1, %3, 0 \n" // write the new value "9: \n" : "=a"(result) : "0" (setval), "a" (testval), "a" (addr) : "memory"); #endif return result; } #if XCHAL_HAVE_EXTERN_REGS static inline unsigned XTHAL_RER (unsigned int reg) { unsigned result; __asm__ __volatile__ ( " rer %0, %1" : "=a" (result) : "a" (reg) : "memory"); return result; } static inline void XTHAL_WER (unsigned reg, unsigned value) { __asm__ __volatile__ ( " wer %0, %1" : : "a" (value), "a" (reg) : "memory"); } #endif /* XCHAL_HAVE_EXTERN_REGS */ /* */ static inline void xthal_mpu_set_entry (xthal_MPU_entry entry) { #if XCHAL_HAVE_MPU __asm__ __volatile__("j 1f\n\t.
127
align 8\n\t1: memw\n\twptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as)); #endif } /* Same as xthal_mpu_set_entry except that this function must not be used to change the MPU entry static inline void xthal_mpu_set_entry_ (xthal_MPU_entry entry) { #if XCHAL_HAVE_MPU __asm__ __volatile__("wptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as)); #endif } #endif /* C code */ #endif /*XTENSA_CACHE_H*/
127
/* xtensa-versions.h -- definitions of Xtensa version and release numbers This file defines most Xtensa-related product versions and releases that exist so far. It also provides a bit of information about which ones are current. This file changes every release, as versions/releases get added. */ // $Id: //depot/rel/Foxhill/dot.9/Xtensa/Software/misc/xtensa-versions.h.tpp#1 $ /* Copyright (c) 2006-2018 Tensilica Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
128
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef XTENSA_VERSIONS_H #define XTENSA_VERSIONS_H /* */ /* Hardware (Xtensa/Diamond processor) versions: */ #define XTENSA_HWVERSION_T1020_0 102000 /* versions T1020.0 */ #define XTENSA_HWCIDSCHEME_T1020_0 10 #define XTENSA_HWCIDVERS_T1020_0 2 #define XTENSA_HWVERSION_T1020_1 102001 /* versions T1020.1 */ #define XTENSA_HWCIDSCHEME_T1020_1 10 #define XTENSA_HWCIDVERS_T1020_1 3 #define XTENSA_HWVERSION_T1020_2 102002 /* versions T1020.2 */ #define XTENSA_HWCIDSCHEME_T1020_2 10 #define XTENSA_HWCIDVERS_T1020_2 4 #define XTENSA_HWVERSION_T1020_2B 102002 /* versions T1020.
128
2b */ #define XTENSA_HWCIDSCHEME_T1020_2B 10 #define XTENSA_HWCIDVERS_T1020_2B 5 #define XTENSA_HWVERSION_T1020_3 102003 /* versions T1020.3 */ #define XTENSA_HWCIDSCHEME_T1020_3 10 #define XTENSA_HWCIDVERS_T1020_3 6 #define XTENSA_HWVERSION_T1020_4 102004 /* versions T1020.4 */ #define XTENSA_HWCIDSCHEME_T1020_4 10 #define XTENSA_HWCIDVERS_T1020_4 7 #define XTENSA_HWVERSION_T1030_0 103000 /* versions T1030.0 */ #define XTENSA_HWCIDSCHEME_T1030_0 10 #define XTENSA_HWCIDVERS_T1030_0 9 #define XTENSA_HWVERSION_T1030_1 103001 /* versions T1030.1 */ #define XTENSA_HWCIDSCHEME_T1030_1 10 #define XTENSA_HWCIDVERS_T1030_1 10 #define XTENSA_HWVERSION_T1030_2 103002 /* versions T1030.2 */ #define XTENSA_HWCIDSCHEME_T1030_2 10 #define XTENSA_HWCIDVERS_T1030_2 11 #define XTENSA_HWVERSION_T1030_3 103003 /* versions T1030.3 */ #define XTENSA_HWCIDSCHEME_T1030_3 10 #define XTENSA_HWCIDVERS_T1030_3 12 #define XTENSA_HWVERSION_T1040_0 104000 /* versions T1040.0 */ #define XTENSA_HWCIDSCHEME_T1040_0 10 #define XTENSA_HWCIDVERS_T1040_0 15 #define XTENSA_HWVERSION_T1040_1 104001 /* versions T1040.
128