PageRenderTime 575ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/cpukit/score/cpu/arm/rtems/score/cpu.h

https://bitbucket.org/cdcs/rtems
C Header | 579 lines | 374 code | 112 blank | 93 comment | 5 complexity | d40bc211adfa20d708d739f94edb8a7c MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1. /**
  2. * @file
  3. *
  4. * @ingroup ScoreCPU
  5. *
  6. * @brief ARM architecture support API.
  7. */
  8. /*
  9. * This include file contains information pertaining to the ARM
  10. * processor.
  11. *
  12. * Copyright (c) 2009-2011 embedded brains GmbH.
  13. *
  14. * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
  15. *
  16. * Copyright (c) 2006 OAR Corporation
  17. *
  18. * Copyright (c) 2002 Advent Networks, Inc.
  19. * Jay Monkman <jmonkman@adventnetworks.com>
  20. *
  21. * COPYRIGHT (c) 2000 Canon Research Centre France SA.
  22. * Emmanuel Raguet, mailto:raguet@crf.canon.fr
  23. *
  24. * The license and distribution terms for this file may be
  25. * found in the file LICENSE in this distribution or at
  26. * http://www.rtems.com/license/LICENSE.
  27. *
  28. */
  29. #ifndef _RTEMS_SCORE_CPU_H
  30. #define _RTEMS_SCORE_CPU_H
  31. #include <rtems/score/types.h>
  32. #include <rtems/score/arm.h>
  33. #if defined(ARM_MULTILIB_ARCH_V4)
  34. /**
  35. * @defgroup ScoreCPUARM ARM Specific Support
  36. *
  37. * @ingroup ScoreCPU
  38. *
  39. * @brief ARM specific support.
  40. *
  41. * @{
  42. */
  43. #ifdef __thumb__
  44. #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
  45. #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
  46. #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
  47. #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
  48. #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
  49. #else
  50. #define ARM_SWITCH_REGISTERS
  51. #define ARM_SWITCH_TO_ARM
  52. #define ARM_SWITCH_BACK
  53. #define ARM_SWITCH_OUTPUT
  54. #define ARM_SWITCH_ADDITIONAL_OUTPUT
  55. #endif
  56. /**
  57. * @name Program Status Register
  58. *
  59. * @{
  60. */
  61. #define ARM_PSR_N (1 << 31)
  62. #define ARM_PSR_Z (1 << 30)
  63. #define ARM_PSR_C (1 << 29)
  64. #define ARM_PSR_V (1 << 28)
  65. #define ARM_PSR_Q (1 << 27)
  66. #define ARM_PSR_J (1 << 24)
  67. #define ARM_PSR_GE_SHIFT 16
  68. #define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
  69. #define ARM_PSR_E (1 << 9)
  70. #define ARM_PSR_A (1 << 8)
  71. #define ARM_PSR_I (1 << 7)
  72. #define ARM_PSR_F (1 << 6)
  73. #define ARM_PSR_T (1 << 5)
  74. #define ARM_PSR_M_SHIFT 0
  75. #define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
  76. #define ARM_PSR_M_USR 0x10
  77. #define ARM_PSR_M_FIQ 0x11
  78. #define ARM_PSR_M_IRQ 0x12
  79. #define ARM_PSR_M_SVC 0x13
  80. #define ARM_PSR_M_ABT 0x17
  81. #define ARM_PSR_M_UND 0x1b
  82. #define ARM_PSR_M_SYS 0x1f
  83. /** @} */
  84. /** @} */
  85. #endif /* defined(ARM_MULTILIB_ARCH_V4) */
  86. /**
  87. * @addtogroup ScoreCPU
  88. *
  89. * @{
  90. */
  91. /* If someone uses THUMB we assume she wants minimal code size */
  92. #ifdef __thumb__
  93. #define CPU_INLINE_ENABLE_DISPATCH FALSE
  94. #else
  95. #define CPU_INLINE_ENABLE_DISPATCH TRUE
  96. #endif
  97. #if defined(__ARMEL__)
  98. #define CPU_BIG_ENDIAN FALSE
  99. #define CPU_LITTLE_ENDIAN TRUE
  100. #elif defined(__ARMEB__)
  101. #define CPU_BIG_ENDIAN TRUE
  102. #define CPU_LITTLE_ENDIAN FALSE
  103. #else
  104. #error "unknown endianness"
  105. #endif
  106. #define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
  107. #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
  108. #define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
  109. #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
  110. #define CPU_ISR_PASSES_FRAME_POINTER 0
  111. #if ( ARM_HAS_FPU == 1 )
  112. #define CPU_HARDWARE_FP TRUE
  113. #else
  114. #define CPU_HARDWARE_FP FALSE
  115. #endif
  116. #define CPU_SOFTWARE_FP FALSE
  117. #define CPU_ALL_TASKS_ARE_FP FALSE
  118. #define CPU_IDLE_TASK_IS_FP FALSE
  119. #define CPU_USE_DEFERRED_FP_SWITCH FALSE
  120. #define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
  121. #define CPU_STACK_GROWS_UP FALSE
  122. /* XXX Why 32? */
  123. #define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
  124. #define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
  125. /*
  126. * The interrupt mask disables only normal interrupts (IRQ).
  127. *
  128. * In order to support fast interrupts (FIQ) such that they can do something
  129. * useful, we have to disable the operating system support for FIQs. Having
  130. * operating system support for them would require that FIQs are disabled
  131. * during critical sections of the operating system and application. At this
  132. * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
  133. * the non critical sections of IRQs, so here they would have a small
  134. * advantage. Without operating system support, the FIQs can execute at any
  135. * time (of course not during the service of another FIQ). If someone needs
  136. * operating system support for a FIQ, she can trigger a software interrupt and
  137. * service the request in a two-step process.
  138. */
  139. #define CPU_MODES_INTERRUPT_MASK 0x80
  140. #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
  141. #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
  142. #define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
  143. #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
  144. #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
  145. #define CPU_STACK_MINIMUM_SIZE (1024 * 4)
  146. /* AAPCS, section 4.1, Fundamental Data Types */
  147. #define CPU_ALIGNMENT 8
  148. #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
  149. /* AAPCS, section 4.3.1, Aggregates */
  150. #define CPU_PARTITION_ALIGNMENT 4
  151. /* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
  152. #define CPU_STACK_ALIGNMENT 8
  153. /*
  154. * Bitfield handler macros.
  155. *
  156. * If we had a particularly fast function for finding the first
  157. * bit set in a word, it would go here. Since we don't (*), we'll
  158. * just use the universal macros.
  159. *
  160. * (*) On ARM V5 and later, there's a CLZ function which could be
  161. * used to implement much quicker than the default macro.
  162. */
  163. #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
  164. #define CPU_USE_GENERIC_BITFIELD_DATA TRUE
  165. /** @} */
  166. #ifndef ASM
  167. #ifdef __cplusplus
  168. extern "C" {
  169. #endif
  170. /**
  171. * @addtogroup ScoreCPU
  172. *
  173. * @{
  174. */
  175. typedef struct {
  176. #if defined(ARM_MULTILIB_ARCH_V4)
  177. uint32_t register_cpsr;
  178. uint32_t register_r4;
  179. uint32_t register_r5;
  180. uint32_t register_r6;
  181. uint32_t register_r7;
  182. uint32_t register_r8;
  183. uint32_t register_r9;
  184. uint32_t register_r10;
  185. uint32_t register_fp;
  186. uint32_t register_sp;
  187. uint32_t register_lr;
  188. #elif defined(ARM_MULTILIB_ARCH_V7M)
  189. uint32_t register_r4;
  190. uint32_t register_r5;
  191. uint32_t register_r6;
  192. uint32_t register_r7;
  193. uint32_t register_r8;
  194. uint32_t register_r9;
  195. uint32_t register_r10;
  196. uint32_t register_r11;
  197. void *register_lr;
  198. void *register_sp;
  199. uint32_t isr_nest_level;
  200. #else
  201. void *register_sp;
  202. #endif
  203. } Context_Control;
  204. typedef struct {
  205. /* Not supported */
  206. } Context_Control_fp;
  207. SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
  208. extern uint32_t arm_cpu_mode;
  209. static inline uint32_t arm_interrupt_disable( void )
  210. {
  211. uint32_t level;
  212. #if defined(ARM_MULTILIB_ARCH_V4)
  213. uint32_t arm_switch_reg;
  214. __asm__ volatile (
  215. ARM_SWITCH_TO_ARM
  216. "mrs %[level], cpsr\n"
  217. "orr %[arm_switch_reg], %[level], #0x80\n"
  218. "msr cpsr, %[arm_switch_reg]\n"
  219. ARM_SWITCH_BACK
  220. : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
  221. );
  222. #elif defined(ARM_MULTILIB_ARCH_V7M)
  223. uint32_t basepri = 0x80;
  224. __asm__ volatile (
  225. "mrs %[level], basepri\n"
  226. "msr basepri_max, %[basepri]\n"
  227. : [level] "=&r" (level)
  228. : [basepri] "r" (basepri)
  229. );
  230. #else
  231. level = 0;
  232. #endif
  233. return level;
  234. }
  235. static inline void arm_interrupt_enable( uint32_t level )
  236. {
  237. #if defined(ARM_MULTILIB_ARCH_V4)
  238. ARM_SWITCH_REGISTERS;
  239. __asm__ volatile (
  240. ARM_SWITCH_TO_ARM
  241. "msr cpsr, %[level]\n"
  242. ARM_SWITCH_BACK
  243. : ARM_SWITCH_OUTPUT
  244. : [level] "r" (level)
  245. );
  246. #elif defined(ARM_MULTILIB_ARCH_V7M)
  247. __asm__ volatile (
  248. "msr basepri, %[level]\n"
  249. :
  250. : [level] "r" (level)
  251. );
  252. #endif
  253. }
  254. static inline void arm_interrupt_flash( uint32_t level )
  255. {
  256. #if defined(ARM_MULTILIB_ARCH_V4)
  257. uint32_t arm_switch_reg;
  258. __asm__ volatile (
  259. ARM_SWITCH_TO_ARM
  260. "mrs %[arm_switch_reg], cpsr\n"
  261. "msr cpsr, %[level]\n"
  262. "msr cpsr, %[arm_switch_reg]\n"
  263. ARM_SWITCH_BACK
  264. : [arm_switch_reg] "=&r" (arm_switch_reg)
  265. : [level] "r" (level)
  266. );
  267. #elif defined(ARM_MULTILIB_ARCH_V7M)
  268. uint32_t basepri;
  269. __asm__ volatile (
  270. "mrs %[basepri], basepri\n"
  271. "msr basepri, %[level]\n"
  272. "msr basepri, %[basepri]\n"
  273. : [basepri] "=&r" (basepri)
  274. : [level] "r" (level)
  275. );
  276. #endif
  277. }
  278. #define _CPU_ISR_Disable( _isr_cookie ) \
  279. do { \
  280. _isr_cookie = arm_interrupt_disable(); \
  281. } while (0)
  282. #define _CPU_ISR_Enable( _isr_cookie ) \
  283. arm_interrupt_enable( _isr_cookie )
  284. #define _CPU_ISR_Flash( _isr_cookie ) \
  285. arm_interrupt_flash( _isr_cookie )
  286. void _CPU_ISR_Set_level( uint32_t level );
  287. uint32_t _CPU_ISR_Get_level( void );
  288. void _CPU_Context_Initialize(
  289. Context_Control *the_context,
  290. void *stack_area_begin,
  291. size_t stack_area_size,
  292. uint32_t new_level,
  293. void (*entry_point)( void ),
  294. bool is_fp
  295. );
  296. #define _CPU_Context_Get_SP( _context ) \
  297. (_context)->register_sp
  298. #define _CPU_Context_Restart_self( _the_context ) \
  299. _CPU_Context_restore( (_the_context) );
  300. #define _CPU_Context_Fp_start( _base, _offset ) \
  301. ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
  302. #define _CPU_Context_Initialize_fp( _destination ) \
  303. do { \
  304. *(*(_destination)) = _CPU_Null_fp_context; \
  305. } while (0)
  306. #define _CPU_Fatal_halt( _err ) \
  307. do { \
  308. uint32_t _level; \
  309. uint32_t _error = _err; \
  310. _CPU_ISR_Disable( _level ); \
  311. __asm__ volatile ("mov r0, %0\n" \
  312. : "=r" (_error) \
  313. : "0" (_error) \
  314. : "r0" ); \
  315. while (1); \
  316. } while (0);
  317. void _CPU_Initialize( void );
  318. #define _CPU_Initialize_vectors()
  319. void _CPU_ISR_install_vector(
  320. uint32_t vector,
  321. proc_ptr new_handler,
  322. proc_ptr *old_handler
  323. );
  324. void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
  325. void _CPU_Context_restore( Context_Control *new_context )
  326. RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
  327. #if defined(ARM_MULTILIB_ARCH_V7M)
  328. void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
  329. void _ARMV7M_Stop_multitasking( Context_Control *bsp )
  330. RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
  331. #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
  332. #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
  333. #endif
  334. void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
  335. void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
  336. static inline uint32_t CPU_swap_u32( uint32_t value )
  337. {
  338. #if defined(__thumb2__)
  339. __asm__ volatile (
  340. "rev %0, %0"
  341. : "=r" (value)
  342. : "0" (value)
  343. );
  344. return value;
  345. #elif defined(__thumb__)
  346. uint32_t byte1, byte2, byte3, byte4, swapped;
  347. byte4 = (value >> 24) & 0xff;
  348. byte3 = (value >> 16) & 0xff;
  349. byte2 = (value >> 8) & 0xff;
  350. byte1 = value & 0xff;
  351. swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
  352. return swapped;
  353. #else
  354. uint32_t tmp = value; /* make compiler warnings go away */
  355. __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
  356. "BIC %1, %1, #0xff0000\n"
  357. "MOV %0, %0, ROR #8\n"
  358. "EOR %0, %0, %1, LSR #8\n"
  359. : "=r" (value), "=r" (tmp)
  360. : "0" (value), "1" (tmp));
  361. return value;
  362. #endif
  363. }
  364. static inline uint16_t CPU_swap_u16( uint16_t value )
  365. {
  366. #if defined(__thumb2__)
  367. __asm__ volatile (
  368. "rev16 %0, %0"
  369. : "=r" (value)
  370. : "0" (value)
  371. );
  372. return value;
  373. #else
  374. return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
  375. #endif
  376. }
  377. /** @} */
  378. #if defined(ARM_MULTILIB_ARCH_V4)
  379. /**
  380. * @addtogroup ScoreCPUARM
  381. *
  382. * @{
  383. */
  384. typedef struct {
  385. uint32_t r0;
  386. uint32_t r1;
  387. uint32_t r2;
  388. uint32_t r3;
  389. uint32_t r4;
  390. uint32_t r5;
  391. uint32_t r6;
  392. uint32_t r7;
  393. uint32_t r8;
  394. uint32_t r9;
  395. uint32_t r10;
  396. uint32_t r11;
  397. uint32_t r12;
  398. uint32_t sp;
  399. uint32_t lr;
  400. uint32_t pc;
  401. uint32_t cpsr;
  402. } arm_cpu_context;
  403. typedef void arm_exc_abort_handler( arm_cpu_context *context );
  404. typedef enum {
  405. ARM_EXCEPTION_RESET = 0,
  406. ARM_EXCEPTION_UNDEF = 1,
  407. ARM_EXCEPTION_SWI = 2,
  408. ARM_EXCEPTION_PREF_ABORT = 3,
  409. ARM_EXCEPTION_DATA_ABORT = 4,
  410. ARM_EXCEPTION_RESERVED = 5,
  411. ARM_EXCEPTION_IRQ = 6,
  412. ARM_EXCEPTION_FIQ = 7,
  413. MAX_EXCEPTIONS = 8
  414. } Arm_symbolic_exception_name;
  415. static inline uint32_t arm_status_irq_enable( void )
  416. {
  417. uint32_t arm_switch_reg;
  418. uint32_t psr;
  419. RTEMS_COMPILER_MEMORY_BARRIER();
  420. __asm__ volatile (
  421. ARM_SWITCH_TO_ARM
  422. "mrs %[psr], cpsr\n"
  423. "bic %[arm_switch_reg], %[psr], #0x80\n"
  424. "msr cpsr, %[arm_switch_reg]\n"
  425. ARM_SWITCH_BACK
  426. : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
  427. );
  428. return psr;
  429. }
  430. static inline void arm_status_restore( uint32_t psr )
  431. {
  432. ARM_SWITCH_REGISTERS;
  433. __asm__ volatile (
  434. ARM_SWITCH_TO_ARM
  435. "msr cpsr, %[psr]\n"
  436. ARM_SWITCH_BACK
  437. : ARM_SWITCH_OUTPUT
  438. : [psr] "r" (psr)
  439. );
  440. RTEMS_COMPILER_MEMORY_BARRIER();
  441. }
  442. void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
  443. void arm_exc_data_abort( void );
  444. void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
  445. void arm_exc_prefetch_abort( void );
  446. void bsp_interrupt_dispatch( void );
  447. void arm_exc_interrupt( void );
  448. void arm_exc_undefined( void );
  449. /** @} */
  450. /* XXX This is out of date */
  451. typedef struct {
  452. uint32_t register_r0;
  453. uint32_t register_r1;
  454. uint32_t register_r2;
  455. uint32_t register_r3;
  456. uint32_t register_ip;
  457. uint32_t register_lr;
  458. } CPU_Exception_frame;
  459. typedef CPU_Exception_frame CPU_Interrupt_frame;
  460. #else /* !defined(ARM_MULTILIB_ARCH_V4) */
  461. typedef void CPU_Interrupt_frame;
  462. #endif /* !defined(ARM_MULTILIB_ARCH_V4) */
  463. #ifdef __cplusplus
  464. }
  465. #endif
  466. #endif /* ASM */
  467. #endif /* _RTEMS_SCORE_CPU_H */