PageRenderTime 104ms CodeModel.GetById 13ms app.highlight 29ms RepoModel.GetById 2ms app.codeStats 0ms

/cpukit/score/cpu/arm/rtems/score/cpu.h

https://bitbucket.org/cdcs/rtems
C Header | 579 lines | 374 code | 112 blank | 93 comment | 5 complexity | d40bc211adfa20d708d739f94edb8a7c MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1/**
  2 * @file
  3 *
  4 * @ingroup ScoreCPU
  5 *
  6 * @brief ARM architecture support API.
  7 */
  8
  9/*
 10 *  This include file contains information pertaining to the ARM
 11 *  processor.
 12 *
 13 *  Copyright (c) 2009-2011 embedded brains GmbH.
 14 *
 15 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
 16 *
 17 *  Copyright (c) 2006 OAR Corporation
 18 *
 19 *  Copyright (c) 2002 Advent Networks, Inc.
 20 *        Jay Monkman <jmonkman@adventnetworks.com>
 21 *
 22 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
 23 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
 24 *
 25 *  The license and distribution terms for this file may be
 26 *  found in the file LICENSE in this distribution or at
 27 *  http://www.rtems.com/license/LICENSE.
 28 *
 29 */
 30
 31#ifndef _RTEMS_SCORE_CPU_H
 32#define _RTEMS_SCORE_CPU_H
 33
 34#include <rtems/score/types.h>
 35#include <rtems/score/arm.h>
 36
 37#if defined(ARM_MULTILIB_ARCH_V4)
 38
 39/**
 40 * @defgroup ScoreCPUARM ARM Specific Support
 41 *
 42 * @ingroup ScoreCPU
 43 *
 44 * @brief ARM specific support.
 45 *
 46 * @{
 47 */
 48
 49#ifdef __thumb__
 50  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
 51  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
 52  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
 53  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
 54  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
 55#else
 56  #define ARM_SWITCH_REGISTERS
 57  #define ARM_SWITCH_TO_ARM
 58  #define ARM_SWITCH_BACK
 59  #define ARM_SWITCH_OUTPUT
 60  #define ARM_SWITCH_ADDITIONAL_OUTPUT
 61#endif
 62
 63/**
 64 * @name Program Status Register
 65 *
 66 * @{
 67 */
 68
 69#define ARM_PSR_N (1 << 31)
 70#define ARM_PSR_Z (1 << 30)
 71#define ARM_PSR_C (1 << 29)
 72#define ARM_PSR_V (1 << 28)
 73#define ARM_PSR_Q (1 << 27)
 74#define ARM_PSR_J (1 << 24)
 75#define ARM_PSR_GE_SHIFT 16
 76#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
 77#define ARM_PSR_E (1 << 9)
 78#define ARM_PSR_A (1 << 8)
 79#define ARM_PSR_I (1 << 7)
 80#define ARM_PSR_F (1 << 6)
 81#define ARM_PSR_T (1 << 5)
 82#define ARM_PSR_M_SHIFT 0
 83#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
 84#define ARM_PSR_M_USR 0x10
 85#define ARM_PSR_M_FIQ 0x11
 86#define ARM_PSR_M_IRQ 0x12
 87#define ARM_PSR_M_SVC 0x13
 88#define ARM_PSR_M_ABT 0x17
 89#define ARM_PSR_M_UND 0x1b
 90#define ARM_PSR_M_SYS 0x1f
 91
 92/** @} */
 93
 94/** @} */
 95
 96#endif /* defined(ARM_MULTILIB_ARCH_V4) */
 97
 98/**
 99 * @addtogroup ScoreCPU
100 *
101 * @{
102 */
103
104/* If someone uses THUMB we assume she wants minimal code size */
105#ifdef __thumb__
106  #define CPU_INLINE_ENABLE_DISPATCH FALSE
107#else
108  #define CPU_INLINE_ENABLE_DISPATCH TRUE
109#endif
110
111#if defined(__ARMEL__)
112  #define CPU_BIG_ENDIAN FALSE
113  #define CPU_LITTLE_ENDIAN TRUE
114#elif defined(__ARMEB__)
115  #define CPU_BIG_ENDIAN TRUE
116  #define CPU_LITTLE_ENDIAN FALSE
117#else
118  #error "unknown endianness"
119#endif
120
121#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
122
123#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
124
125#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
126
127#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
128
129#define CPU_ISR_PASSES_FRAME_POINTER 0
130
131#if ( ARM_HAS_FPU == 1 )
132  #define CPU_HARDWARE_FP TRUE
133#else
134  #define CPU_HARDWARE_FP FALSE
135#endif
136
137#define CPU_SOFTWARE_FP FALSE
138
139#define CPU_ALL_TASKS_ARE_FP FALSE
140
141#define CPU_IDLE_TASK_IS_FP FALSE
142
143#define CPU_USE_DEFERRED_FP_SWITCH FALSE
144
145#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
146
147#define CPU_STACK_GROWS_UP FALSE
148
149/* XXX Why 32? */
150#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
151
152#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
153
154/*
155 * The interrupt mask disables only normal interrupts (IRQ).
156 *
157 * In order to support fast interrupts (FIQ) such that they can do something
158 * useful, we have to disable the operating system support for FIQs.  Having
159 * operating system support for them would require that FIQs are disabled
160 * during critical sections of the operating system and application.  At this
161 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
162 * the non critical sections of IRQs, so here they would have a small
163 * advantage.  Without operating system support, the FIQs can execute at any
164 * time (of course not during the service of another FIQ). If someone needs
165 * operating system support for a FIQ, she can trigger a software interrupt and
166 * service the request in a two-step process.
167 */
168#define CPU_MODES_INTERRUPT_MASK 0x80
169
170#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
171
172#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
173
174#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
175
176#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
177
178#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
179
180#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
181
182/* AAPCS, section 4.1, Fundamental Data Types */
183#define CPU_ALIGNMENT 8
184
185#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
186
187/* AAPCS, section 4.3.1, Aggregates */
188#define CPU_PARTITION_ALIGNMENT 4
189
190/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
191#define CPU_STACK_ALIGNMENT 8
192
193/*
194 * Bitfield handler macros.
195 *
196 * If we had a particularly fast function for finding the first
197 * bit set in a word, it would go here. Since we don't (*), we'll
198 * just use the universal macros.
199 *
200 * (*) On ARM V5 and later, there's a CLZ function which could be
201 *     used to implement much quicker than the default macro.
202 */
203
204#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
205
206#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
207
208/** @} */
209
210#ifndef ASM
211
212#ifdef __cplusplus
213extern "C" {
214#endif
215
216/**
217 * @addtogroup ScoreCPU
218 *
219 * @{
220 */
221
222typedef struct {
223#if defined(ARM_MULTILIB_ARCH_V4)
224  uint32_t register_cpsr;
225  uint32_t register_r4;
226  uint32_t register_r5;
227  uint32_t register_r6;
228  uint32_t register_r7;
229  uint32_t register_r8;
230  uint32_t register_r9;
231  uint32_t register_r10;
232  uint32_t register_fp;
233  uint32_t register_sp;
234  uint32_t register_lr;
235#elif defined(ARM_MULTILIB_ARCH_V7M)
236  uint32_t register_r4;
237  uint32_t register_r5;
238  uint32_t register_r6;
239  uint32_t register_r7;
240  uint32_t register_r8;
241  uint32_t register_r9;
242  uint32_t register_r10;
243  uint32_t register_r11;
244  void *register_lr;
245  void *register_sp;
246  uint32_t isr_nest_level;
247#else
248  void *register_sp;
249#endif
250} Context_Control;
251
252typedef struct {
253  /* Not supported */
254} Context_Control_fp;
255
256SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
257
258extern uint32_t arm_cpu_mode;
259
260static inline uint32_t arm_interrupt_disable( void )
261{
262  uint32_t level;
263
264#if defined(ARM_MULTILIB_ARCH_V4)
265  uint32_t arm_switch_reg;
266
267  __asm__ volatile (
268    ARM_SWITCH_TO_ARM
269    "mrs %[level], cpsr\n"
270    "orr %[arm_switch_reg], %[level], #0x80\n"
271    "msr cpsr, %[arm_switch_reg]\n"
272    ARM_SWITCH_BACK
273    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
274  );
275#elif defined(ARM_MULTILIB_ARCH_V7M)
276  uint32_t basepri = 0x80;
277
278  __asm__ volatile (
279    "mrs %[level], basepri\n"
280    "msr basepri_max, %[basepri]\n"
281    : [level] "=&r" (level)
282    : [basepri] "r" (basepri)
283  );
284#else
285  level = 0;
286#endif
287
288  return level;
289}
290
291static inline void arm_interrupt_enable( uint32_t level )
292{
293#if defined(ARM_MULTILIB_ARCH_V4)
294  ARM_SWITCH_REGISTERS;
295
296  __asm__ volatile (
297    ARM_SWITCH_TO_ARM
298    "msr cpsr, %[level]\n"
299    ARM_SWITCH_BACK
300    : ARM_SWITCH_OUTPUT
301    : [level] "r" (level)
302  );
303#elif defined(ARM_MULTILIB_ARCH_V7M)
304  __asm__ volatile (
305    "msr basepri, %[level]\n"
306    :
307    : [level] "r" (level)
308  );
309#endif
310}
311
312static inline void arm_interrupt_flash( uint32_t level )
313{
314#if defined(ARM_MULTILIB_ARCH_V4)
315  uint32_t arm_switch_reg;
316
317  __asm__ volatile (
318    ARM_SWITCH_TO_ARM
319    "mrs %[arm_switch_reg], cpsr\n"
320    "msr cpsr, %[level]\n"
321    "msr cpsr, %[arm_switch_reg]\n"
322    ARM_SWITCH_BACK
323    : [arm_switch_reg] "=&r" (arm_switch_reg)
324    : [level] "r" (level)
325  );
326#elif defined(ARM_MULTILIB_ARCH_V7M)
327  uint32_t basepri;
328
329  __asm__ volatile (
330    "mrs %[basepri], basepri\n"
331    "msr basepri, %[level]\n"
332    "msr basepri, %[basepri]\n"
333    : [basepri] "=&r" (basepri)
334    : [level] "r" (level)
335  );
336#endif
337}
338
339#define _CPU_ISR_Disable( _isr_cookie ) \
340  do { \
341    _isr_cookie = arm_interrupt_disable(); \
342  } while (0)
343
344#define _CPU_ISR_Enable( _isr_cookie )  \
345  arm_interrupt_enable( _isr_cookie )
346
347#define _CPU_ISR_Flash( _isr_cookie ) \
348  arm_interrupt_flash( _isr_cookie )
349
350void _CPU_ISR_Set_level( uint32_t level );
351
352uint32_t _CPU_ISR_Get_level( void );
353
354void _CPU_Context_Initialize(
355  Context_Control *the_context,
356  void *stack_area_begin,
357  size_t stack_area_size,
358  uint32_t new_level,
359  void (*entry_point)( void ),
360  bool is_fp
361);
362
363#define _CPU_Context_Get_SP( _context ) \
364  (_context)->register_sp
365
366#define _CPU_Context_Restart_self( _the_context ) \
367   _CPU_Context_restore( (_the_context) );
368
369#define _CPU_Context_Fp_start( _base, _offset ) \
370   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
371
372#define _CPU_Context_Initialize_fp( _destination ) \
373  do { \
374    *(*(_destination)) = _CPU_Null_fp_context; \
375  } while (0)
376
377#define _CPU_Fatal_halt( _err )             \
378   do {                                     \
379     uint32_t _level;                       \
380     uint32_t _error = _err;                \
381     _CPU_ISR_Disable( _level );            \
382     __asm__ volatile ("mov r0, %0\n"           \
383                   : "=r" (_error)          \
384                   : "0" (_error)           \
385                   : "r0" );                \
386     while (1);                             \
387   } while (0);
388
389void _CPU_Initialize( void );
390
391#define _CPU_Initialize_vectors()
392
393void _CPU_ISR_install_vector(
394  uint32_t vector,
395  proc_ptr new_handler,
396  proc_ptr *old_handler
397);
398
399void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
400
401void _CPU_Context_restore( Context_Control *new_context )
402  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
403
404#if defined(ARM_MULTILIB_ARCH_V7M)
405  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
406  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
407    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
408  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
409  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
410#endif
411
412void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
413
414void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
415
416static inline uint32_t CPU_swap_u32( uint32_t value )
417{
418#if defined(__thumb2__)
419  __asm__ volatile (
420    "rev %0, %0"
421    : "=r" (value)
422    : "0" (value)
423  );
424  return value;
425#elif defined(__thumb__)
426  uint32_t byte1, byte2, byte3, byte4, swapped;
427
428  byte4 = (value >> 24) & 0xff;
429  byte3 = (value >> 16) & 0xff;
430  byte2 = (value >> 8)  & 0xff;
431  byte1 =  value & 0xff;
432
433  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
434  return swapped;
435#else
436  uint32_t tmp = value; /* make compiler warnings go away */
437  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
438                "BIC %1, %1, #0xff0000\n"
439                "MOV %0, %0, ROR #8\n"
440                "EOR %0, %0, %1, LSR #8\n"
441                : "=r" (value), "=r" (tmp)
442                : "0" (value), "1" (tmp));
443  return value;
444#endif
445}
446
447static inline uint16_t CPU_swap_u16( uint16_t value )
448{
449#if defined(__thumb2__)
450  __asm__ volatile (
451    "rev16 %0, %0"
452    : "=r" (value)
453    : "0" (value)
454  );
455  return value;
456#else
457  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
458#endif
459}
460
461/** @} */
462
463#if defined(ARM_MULTILIB_ARCH_V4)
464
465/**
466 * @addtogroup ScoreCPUARM
467 *
468 * @{
469 */
470
471typedef struct {
472  uint32_t r0;
473  uint32_t r1;
474  uint32_t r2;
475  uint32_t r3;
476  uint32_t r4;
477  uint32_t r5;
478  uint32_t r6;
479  uint32_t r7;
480  uint32_t r8;
481  uint32_t r9;
482  uint32_t r10;
483  uint32_t r11;
484  uint32_t r12;
485  uint32_t sp;
486  uint32_t lr;
487  uint32_t pc;
488  uint32_t cpsr;
489} arm_cpu_context;
490
491typedef void arm_exc_abort_handler( arm_cpu_context *context );
492
493typedef enum {
494  ARM_EXCEPTION_RESET = 0,
495  ARM_EXCEPTION_UNDEF = 1,
496  ARM_EXCEPTION_SWI = 2,
497  ARM_EXCEPTION_PREF_ABORT = 3,
498  ARM_EXCEPTION_DATA_ABORT = 4,
499  ARM_EXCEPTION_RESERVED = 5,
500  ARM_EXCEPTION_IRQ = 6,
501  ARM_EXCEPTION_FIQ = 7,
502  MAX_EXCEPTIONS = 8
503} Arm_symbolic_exception_name;
504
505static inline uint32_t arm_status_irq_enable( void )
506{
507  uint32_t arm_switch_reg;
508  uint32_t psr;
509
510  RTEMS_COMPILER_MEMORY_BARRIER();
511
512  __asm__ volatile (
513    ARM_SWITCH_TO_ARM
514    "mrs %[psr], cpsr\n"
515    "bic %[arm_switch_reg], %[psr], #0x80\n"
516    "msr cpsr, %[arm_switch_reg]\n"
517    ARM_SWITCH_BACK
518    : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
519  );
520
521  return psr;
522}
523
524static inline void arm_status_restore( uint32_t psr )
525{
526  ARM_SWITCH_REGISTERS;
527
528  __asm__ volatile (
529    ARM_SWITCH_TO_ARM
530    "msr cpsr, %[psr]\n"
531    ARM_SWITCH_BACK
532    : ARM_SWITCH_OUTPUT
533    : [psr] "r" (psr)
534  );
535
536  RTEMS_COMPILER_MEMORY_BARRIER();
537}
538
539void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
540
541void arm_exc_data_abort( void );
542
543void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
544
545void arm_exc_prefetch_abort( void );
546
547void bsp_interrupt_dispatch( void );
548
549void arm_exc_interrupt( void );
550
551void arm_exc_undefined( void );
552
553/** @} */
554
555/* XXX This is out of date */
556typedef struct {
557  uint32_t register_r0;
558  uint32_t register_r1;
559  uint32_t register_r2;
560  uint32_t register_r3;
561  uint32_t register_ip;
562  uint32_t register_lr;
563} CPU_Exception_frame;
564
565typedef CPU_Exception_frame CPU_Interrupt_frame;
566
567#else /* !defined(ARM_MULTILIB_ARCH_V4) */
568
569typedef void CPU_Interrupt_frame;
570
571#endif /* !defined(ARM_MULTILIB_ARCH_V4) */
572
573#ifdef __cplusplus
574}
575#endif
576
577#endif /* ASM */
578
579#endif /* _RTEMS_SCORE_CPU_H */