PageRenderTime 49ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 1ms

/cpukit/score/cpu/arm/rtems/score/cpu.h

https://bitbucket.org/cdcs/leon-rtems
C Header | 581 lines | 374 code | 112 blank | 95 comment | 5 complexity | 91bd2a35117e6b4c46f1b9f7f8010ae0 MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. /**
  2. * @file
  3. *
  4. * @ingroup ScoreCPU
  5. *
  6. * @brief ARM architecture support API.
  7. */
  8. /*
  9. * $Id$
  10. *
  11. * This include file contains information pertaining to the ARM
  12. * processor.
  13. *
  14. * Copyright (c) 2009-2011 embedded brains GmbH.
  15. *
  16. * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
  17. *
  18. * Copyright (c) 2006 OAR Corporation
  19. *
  20. * Copyright (c) 2002 Advent Networks, Inc.
  21. * Jay Monkman <jmonkman@adventnetworks.com>
  22. *
  23. * COPYRIGHT (c) 2000 Canon Research Centre France SA.
  24. * Emmanuel Raguet, mailto:raguet@crf.canon.fr
  25. *
  26. * The license and distribution terms for this file may be
  27. * found in the file LICENSE in this distribution or at
  28. * http://www.rtems.com/license/LICENSE.
  29. *
  30. */
  31. #ifndef _RTEMS_SCORE_CPU_H
  32. #define _RTEMS_SCORE_CPU_H
  33. #include <rtems/score/types.h>
  34. #include <rtems/score/arm.h>
  35. #if defined(ARM_MULTILIB_ARCH_V4)
  36. /**
  37. * @defgroup ScoreCPUARM ARM Specific Support
  38. *
  39. * @ingroup ScoreCPU
  40. *
  41. * @brief ARM specific support.
  42. *
  43. * @{
  44. */
  45. #ifdef __thumb__
  46. #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
  47. #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
  48. #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
  49. #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
  50. #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
  51. #else
  52. #define ARM_SWITCH_REGISTERS
  53. #define ARM_SWITCH_TO_ARM
  54. #define ARM_SWITCH_BACK
  55. #define ARM_SWITCH_OUTPUT
  56. #define ARM_SWITCH_ADDITIONAL_OUTPUT
  57. #endif
  58. /**
  59. * @name Program Status Register
  60. *
  61. * @{
  62. */
  63. #define ARM_PSR_N (1 << 31)
  64. #define ARM_PSR_Z (1 << 30)
  65. #define ARM_PSR_C (1 << 29)
  66. #define ARM_PSR_V (1 << 28)
  67. #define ARM_PSR_Q (1 << 27)
  68. #define ARM_PSR_J (1 << 24)
  69. #define ARM_PSR_GE_SHIFT 16
  70. #define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
  71. #define ARM_PSR_E (1 << 9)
  72. #define ARM_PSR_A (1 << 8)
  73. #define ARM_PSR_I (1 << 7)
  74. #define ARM_PSR_F (1 << 6)
  75. #define ARM_PSR_T (1 << 5)
  76. #define ARM_PSR_M_SHIFT 0
  77. #define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
  78. #define ARM_PSR_M_USR 0x10
  79. #define ARM_PSR_M_FIQ 0x11
  80. #define ARM_PSR_M_IRQ 0x12
  81. #define ARM_PSR_M_SVC 0x13
  82. #define ARM_PSR_M_ABT 0x17
  83. #define ARM_PSR_M_UND 0x1b
  84. #define ARM_PSR_M_SYS 0x1f
  85. /** @} */
  86. /** @} */
  87. #endif /* defined(ARM_MULTILIB_ARCH_V4) */
  88. /**
  89. * @addtogroup ScoreCPU
  90. *
  91. * @{
  92. */
  93. /* If someone uses THUMB we assume she wants minimal code size */
  94. #ifdef __thumb__
  95. #define CPU_INLINE_ENABLE_DISPATCH FALSE
  96. #else
  97. #define CPU_INLINE_ENABLE_DISPATCH TRUE
  98. #endif
  99. #if defined(__ARMEL__)
  100. #define CPU_BIG_ENDIAN FALSE
  101. #define CPU_LITTLE_ENDIAN TRUE
  102. #elif defined(__ARMEB__)
  103. #define CPU_BIG_ENDIAN TRUE
  104. #define CPU_LITTLE_ENDIAN FALSE
  105. #else
  106. #error "unknown endianness"
  107. #endif
  108. #define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
  109. #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
  110. #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
  111. #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
  112. #define CPU_ISR_PASSES_FRAME_POINTER 0
  113. #if ( ARM_HAS_FPU == 1 )
  114. #define CPU_HARDWARE_FP TRUE
  115. #else
  116. #define CPU_HARDWARE_FP FALSE
  117. #endif
  118. #define CPU_SOFTWARE_FP FALSE
  119. #define CPU_ALL_TASKS_ARE_FP FALSE
  120. #define CPU_IDLE_TASK_IS_FP FALSE
  121. #define CPU_USE_DEFERRED_FP_SWITCH FALSE
  122. #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
  123. #define CPU_STACK_GROWS_UP FALSE
  124. /* XXX Why 32? */
  125. #define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
  126. #define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
  127. /*
  128. * The interrupt mask disables only normal interrupts (IRQ).
  129. *
  130. * In order to support fast interrupts (FIQ) such that they can do something
  131. * useful, we have to disable the operating system support for FIQs. Having
  132. * operating system support for them would require that FIQs are disabled
  133. * during critical sections of the operating system and application. At this
  134. * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
  135. * the non critical sections of IRQs, so here they would have a small
  136. * advantage. Without operating system support, the FIQs can execute at any
  137. * time (of course not during the service of another FIQ). If someone needs
  138. * operating system support for a FIQ, she can trigger a software interrupt and
  139. * service the request in a two-step process.
  140. */
  141. #define CPU_MODES_INTERRUPT_MASK 0x80
  142. #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
  143. #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
  144. #define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
  145. #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
  146. #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
  147. #define CPU_STACK_MINIMUM_SIZE (1024 * 4)
  148. /* AAPCS, section 4.1, Fundamental Data Types */
  149. #define CPU_ALIGNMENT 8
  150. #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
  151. /* AAPCS, section 4.3.1, Aggregates */
  152. #define CPU_PARTITION_ALIGNMENT 4
  153. /* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
  154. #define CPU_STACK_ALIGNMENT 8
  155. /*
  156. * Bitfield handler macros.
  157. *
  158. * If we had a particularly fast function for finding the first
  159. * bit set in a word, it would go here. Since we don't (*), we'll
  160. * just use the universal macros.
  161. *
  162. * (*) On ARM V5 and later, there's a CLZ function which could be
  163. * used to implement much quicker than the default macro.
  164. */
  165. #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
  166. #define CPU_USE_GENERIC_BITFIELD_DATA TRUE
  167. /** @} */
  168. #ifndef ASM
  169. #ifdef __cplusplus
  170. extern "C" {
  171. #endif
  172. /**
  173. * @addtogroup ScoreCPU
  174. *
  175. * @{
  176. */
  177. typedef struct {
  178. #if defined(ARM_MULTILIB_ARCH_V4)
  179. uint32_t register_cpsr;
  180. uint32_t register_r4;
  181. uint32_t register_r5;
  182. uint32_t register_r6;
  183. uint32_t register_r7;
  184. uint32_t register_r8;
  185. uint32_t register_r9;
  186. uint32_t register_r10;
  187. uint32_t register_fp;
  188. uint32_t register_sp;
  189. uint32_t register_lr;
  190. #elif defined(ARM_MULTILIB_ARCH_V7M)
  191. uint32_t register_r4;
  192. uint32_t register_r5;
  193. uint32_t register_r6;
  194. uint32_t register_r7;
  195. uint32_t register_r8;
  196. uint32_t register_r9;
  197. uint32_t register_r10;
  198. uint32_t register_r11;
  199. void *register_lr;
  200. void *register_sp;
  201. uint32_t isr_nest_level;
  202. #else
  203. void *register_sp;
  204. #endif
  205. } Context_Control;
  206. typedef struct {
  207. /* Not supported */
  208. } Context_Control_fp;
  209. SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
  210. extern uint32_t arm_cpu_mode;
  211. static inline uint32_t arm_interrupt_disable( void )
  212. {
  213. uint32_t level;
  214. #if defined(ARM_MULTILIB_ARCH_V4)
  215. uint32_t arm_switch_reg;
  216. __asm__ volatile (
  217. ARM_SWITCH_TO_ARM
  218. "mrs %[level], cpsr\n"
  219. "orr %[arm_switch_reg], %[level], #0x80\n"
  220. "msr cpsr, %[arm_switch_reg]\n"
  221. ARM_SWITCH_BACK
  222. : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
  223. );
  224. #elif defined(ARM_MULTILIB_ARCH_V7M)
  225. uint32_t basepri = 0x80;
  226. __asm__ volatile (
  227. "mrs %[level], basepri\n"
  228. "msr basepri_max, %[basepri]\n"
  229. : [level] "=&r" (level)
  230. : [basepri] "r" (basepri)
  231. );
  232. #else
  233. level = 0;
  234. #endif
  235. return level;
  236. }
  237. static inline void arm_interrupt_enable( uint32_t level )
  238. {
  239. #if defined(ARM_MULTILIB_ARCH_V4)
  240. ARM_SWITCH_REGISTERS;
  241. __asm__ volatile (
  242. ARM_SWITCH_TO_ARM
  243. "msr cpsr, %[level]\n"
  244. ARM_SWITCH_BACK
  245. : ARM_SWITCH_OUTPUT
  246. : [level] "r" (level)
  247. );
  248. #elif defined(ARM_MULTILIB_ARCH_V7M)
  249. __asm__ volatile (
  250. "msr basepri, %[level]\n"
  251. :
  252. : [level] "r" (level)
  253. );
  254. #endif
  255. }
  256. static inline void arm_interrupt_flash( uint32_t level )
  257. {
  258. #if defined(ARM_MULTILIB_ARCH_V4)
  259. uint32_t arm_switch_reg;
  260. __asm__ volatile (
  261. ARM_SWITCH_TO_ARM
  262. "mrs %[arm_switch_reg], cpsr\n"
  263. "msr cpsr, %[level]\n"
  264. "msr cpsr, %[arm_switch_reg]\n"
  265. ARM_SWITCH_BACK
  266. : [arm_switch_reg] "=&r" (arm_switch_reg)
  267. : [level] "r" (level)
  268. );
  269. #elif defined(ARM_MULTILIB_ARCH_V7M)
  270. uint32_t basepri;
  271. __asm__ volatile (
  272. "mrs %[basepri], basepri\n"
  273. "msr basepri, %[level]\n"
  274. "msr basepri, %[basepri]\n"
  275. : [basepri] "=&r" (basepri)
  276. : [level] "r" (level)
  277. );
  278. #endif
  279. }
  280. #define _CPU_ISR_Disable( _isr_cookie ) \
  281. do { \
  282. _isr_cookie = arm_interrupt_disable(); \
  283. } while (0)
  284. #define _CPU_ISR_Enable( _isr_cookie ) \
  285. arm_interrupt_enable( _isr_cookie )
  286. #define _CPU_ISR_Flash( _isr_cookie ) \
  287. arm_interrupt_flash( _isr_cookie )
  288. void _CPU_ISR_Set_level( uint32_t level );
  289. uint32_t _CPU_ISR_Get_level( void );
  290. void _CPU_Context_Initialize(
  291. Context_Control *the_context,
  292. void *stack_area_begin,
  293. size_t stack_area_size,
  294. uint32_t new_level,
  295. void (*entry_point)( void ),
  296. bool is_fp
  297. );
  298. #define _CPU_Context_Get_SP( _context ) \
  299. (_context)->register_sp
  300. #define _CPU_Context_Restart_self( _the_context ) \
  301. _CPU_Context_restore( (_the_context) );
  302. #define _CPU_Context_Fp_start( _base, _offset ) \
  303. ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
  304. #define _CPU_Context_Initialize_fp( _destination ) \
  305. do { \
  306. *(*(_destination)) = _CPU_Null_fp_context; \
  307. } while (0)
  308. #define _CPU_Fatal_halt( _err ) \
  309. do { \
  310. uint32_t _level; \
  311. uint32_t _error = _err; \
  312. _CPU_ISR_Disable( _level ); \
  313. __asm__ volatile ("mov r0, %0\n" \
  314. : "=r" (_error) \
  315. : "0" (_error) \
  316. : "r0" ); \
  317. while (1); \
  318. } while (0);
  319. void _CPU_Initialize( void );
  320. #define _CPU_Initialize_vectors()
  321. void _CPU_ISR_install_vector(
  322. uint32_t vector,
  323. proc_ptr new_handler,
  324. proc_ptr *old_handler
  325. );
  326. void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
  327. void _CPU_Context_restore( Context_Control *new_context )
  328. RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
  329. #if defined(ARM_MULTILIB_ARCH_V7M)
  330. void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
  331. void _ARMV7M_Stop_multitasking( Context_Control *bsp )
  332. RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
  333. #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
  334. #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
  335. #endif
  336. void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
  337. void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
  338. static inline uint32_t CPU_swap_u32( uint32_t value )
  339. {
  340. #if defined(__thumb2__)
  341. __asm__ volatile (
  342. "rev %0, %0"
  343. : "=r" (value)
  344. : "0" (value)
  345. );
  346. return value;
  347. #elif defined(__thumb__)
  348. uint32_t byte1, byte2, byte3, byte4, swapped;
  349. byte4 = (value >> 24) & 0xff;
  350. byte3 = (value >> 16) & 0xff;
  351. byte2 = (value >> 8) & 0xff;
  352. byte1 = value & 0xff;
  353. swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
  354. return swapped;
  355. #else
  356. uint32_t tmp = value; /* make compiler warnings go away */
  357. __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
  358. "BIC %1, %1, #0xff0000\n"
  359. "MOV %0, %0, ROR #8\n"
  360. "EOR %0, %0, %1, LSR #8\n"
  361. : "=r" (value), "=r" (tmp)
  362. : "0" (value), "1" (tmp));
  363. return value;
  364. #endif
  365. }
  366. static inline uint16_t CPU_swap_u16( uint16_t value )
  367. {
  368. #if defined(__thumb2__)
  369. __asm__ volatile (
  370. "rev16 %0, %0"
  371. : "=r" (value)
  372. : "0" (value)
  373. );
  374. return value;
  375. #else
  376. return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
  377. #endif
  378. }
  379. /** @} */
  380. #if defined(ARM_MULTILIB_ARCH_V4)
  381. /**
  382. * @addtogroup ScoreCPUARM
  383. *
  384. * @{
  385. */
  386. typedef struct {
  387. uint32_t r0;
  388. uint32_t r1;
  389. uint32_t r2;
  390. uint32_t r3;
  391. uint32_t r4;
  392. uint32_t r5;
  393. uint32_t r6;
  394. uint32_t r7;
  395. uint32_t r8;
  396. uint32_t r9;
  397. uint32_t r10;
  398. uint32_t r11;
  399. uint32_t r12;
  400. uint32_t sp;
  401. uint32_t lr;
  402. uint32_t pc;
  403. uint32_t cpsr;
  404. } arm_cpu_context;
  405. typedef void arm_exc_abort_handler( arm_cpu_context *context );
  406. typedef enum {
  407. ARM_EXCEPTION_RESET = 0,
  408. ARM_EXCEPTION_UNDEF = 1,
  409. ARM_EXCEPTION_SWI = 2,
  410. ARM_EXCEPTION_PREF_ABORT = 3,
  411. ARM_EXCEPTION_DATA_ABORT = 4,
  412. ARM_EXCEPTION_RESERVED = 5,
  413. ARM_EXCEPTION_IRQ = 6,
  414. ARM_EXCEPTION_FIQ = 7,
  415. MAX_EXCEPTIONS = 8
  416. } Arm_symbolic_exception_name;
  417. static inline uint32_t arm_status_irq_enable( void )
  418. {
  419. uint32_t arm_switch_reg;
  420. uint32_t psr;
  421. RTEMS_COMPILER_MEMORY_BARRIER();
  422. __asm__ volatile (
  423. ARM_SWITCH_TO_ARM
  424. "mrs %[psr], cpsr\n"
  425. "bic %[arm_switch_reg], %[psr], #0x80\n"
  426. "msr cpsr, %[arm_switch_reg]\n"
  427. ARM_SWITCH_BACK
  428. : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
  429. );
  430. return psr;
  431. }
  432. static inline void arm_status_restore( uint32_t psr )
  433. {
  434. ARM_SWITCH_REGISTERS;
  435. __asm__ volatile (
  436. ARM_SWITCH_TO_ARM
  437. "msr cpsr, %[psr]\n"
  438. ARM_SWITCH_BACK
  439. : ARM_SWITCH_OUTPUT
  440. : [psr] "r" (psr)
  441. );
  442. RTEMS_COMPILER_MEMORY_BARRIER();
  443. }
  444. void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
  445. void arm_exc_data_abort( void );
  446. void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
  447. void arm_exc_prefetch_abort( void );
  448. void bsp_interrupt_dispatch( void );
  449. void arm_exc_interrupt( void );
  450. void arm_exc_undefined( void );
  451. /** @} */
  452. /* XXX This is out of date */
  453. typedef struct {
  454. uint32_t register_r0;
  455. uint32_t register_r1;
  456. uint32_t register_r2;
  457. uint32_t register_r3;
  458. uint32_t register_ip;
  459. uint32_t register_lr;
  460. } CPU_Exception_frame;
  461. typedef CPU_Exception_frame CPU_Interrupt_frame;
  462. #else /* !defined(ARM_MULTILIB_ARCH_V4) */
  463. typedef void CPU_Interrupt_frame;
  464. #endif /* !defined(ARM_MULTILIB_ARCH_V4) */
  465. #ifdef __cplusplus
  466. }
  467. #endif
  468. #endif /* ASM */
  469. #endif /* _RTEMS_SCORE_CPU_H */