PageRenderTime 38ms CodeModel.GetById 22ms app.highlight 12ms RepoModel.GetById 1ms app.codeStats 0ms

/cpukit/score/cpu/arm/rtems/score/cpu.h

https://bitbucket.org/cdcs/leon-rtems
C Header | 581 lines | 374 code | 112 blank | 95 comment | 5 complexity | 91bd2a35117e6b4c46f1b9f7f8010ae0 MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause
  1/**
  2 * @file
  3 *
  4 * @ingroup ScoreCPU
  5 *
  6 * @brief ARM architecture support API.
  7 */
  8
  9/*
 10 * $Id$
 11 *
 12 *  This include file contains information pertaining to the ARM
 13 *  processor.
 14 *
 15 *  Copyright (c) 2009-2011 embedded brains GmbH.
 16 *
 17 *  Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
 18 *
 19 *  Copyright (c) 2006 OAR Corporation
 20 *
 21 *  Copyright (c) 2002 Advent Networks, Inc.
 22 *        Jay Monkman <jmonkman@adventnetworks.com>
 23 *
 24 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
 25 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
 26 *
 27 *  The license and distribution terms for this file may be
 28 *  found in the file LICENSE in this distribution or at
 29 *  http://www.rtems.com/license/LICENSE.
 30 *
 31 */
 32
 33#ifndef _RTEMS_SCORE_CPU_H
 34#define _RTEMS_SCORE_CPU_H
 35
 36#include <rtems/score/types.h>
 37#include <rtems/score/arm.h>
 38
 39#if defined(ARM_MULTILIB_ARCH_V4)
 40
 41/**
 42 * @defgroup ScoreCPUARM ARM Specific Support
 43 *
 44 * @ingroup ScoreCPU
 45 *
 46 * @brief ARM specific support.
 47 *
 48 * @{
 49 */
 50
 51#ifdef __thumb__
 52  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
 53  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
 54  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
 55  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
 56  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
 57#else
 58  #define ARM_SWITCH_REGISTERS
 59  #define ARM_SWITCH_TO_ARM
 60  #define ARM_SWITCH_BACK
 61  #define ARM_SWITCH_OUTPUT
 62  #define ARM_SWITCH_ADDITIONAL_OUTPUT
 63#endif
 64
 65/**
 66 * @name Program Status Register
 67 *
 68 * @{
 69 */
 70
 71#define ARM_PSR_N (1 << 31)
 72#define ARM_PSR_Z (1 << 30)
 73#define ARM_PSR_C (1 << 29)
 74#define ARM_PSR_V (1 << 28)
 75#define ARM_PSR_Q (1 << 27)
 76#define ARM_PSR_J (1 << 24)
 77#define ARM_PSR_GE_SHIFT 16
 78#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
 79#define ARM_PSR_E (1 << 9)
 80#define ARM_PSR_A (1 << 8)
 81#define ARM_PSR_I (1 << 7)
 82#define ARM_PSR_F (1 << 6)
 83#define ARM_PSR_T (1 << 5)
 84#define ARM_PSR_M_SHIFT 0
 85#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
 86#define ARM_PSR_M_USR 0x10
 87#define ARM_PSR_M_FIQ 0x11
 88#define ARM_PSR_M_IRQ 0x12
 89#define ARM_PSR_M_SVC 0x13
 90#define ARM_PSR_M_ABT 0x17
 91#define ARM_PSR_M_UND 0x1b
 92#define ARM_PSR_M_SYS 0x1f
 93
 94/** @} */
 95
 96/** @} */
 97
 98#endif /* defined(ARM_MULTILIB_ARCH_V4) */
 99
100/**
101 * @addtogroup ScoreCPU
102 *
103 * @{
104 */
105
106/* If someone uses THUMB we assume she wants minimal code size */
107#ifdef __thumb__
108  #define CPU_INLINE_ENABLE_DISPATCH FALSE
109#else
110  #define CPU_INLINE_ENABLE_DISPATCH TRUE
111#endif
112
113#if defined(__ARMEL__)
114  #define CPU_BIG_ENDIAN FALSE
115  #define CPU_LITTLE_ENDIAN TRUE
116#elif defined(__ARMEB__)
117  #define CPU_BIG_ENDIAN TRUE
118  #define CPU_LITTLE_ENDIAN FALSE
119#else
120  #error "unknown endianness"
121#endif
122
123#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
124
125#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
126
127#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
128
129#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
130
131#define CPU_ISR_PASSES_FRAME_POINTER 0
132
133#if ( ARM_HAS_FPU == 1 )
134  #define CPU_HARDWARE_FP TRUE
135#else
136  #define CPU_HARDWARE_FP FALSE
137#endif
138
139#define CPU_SOFTWARE_FP FALSE
140
141#define CPU_ALL_TASKS_ARE_FP FALSE
142
143#define CPU_IDLE_TASK_IS_FP FALSE
144
145#define CPU_USE_DEFERRED_FP_SWITCH FALSE
146
147#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
148
149#define CPU_STACK_GROWS_UP FALSE
150
151/* XXX Why 32? */
152#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
153
154#define CPU_TIMESTAMP_USE_INT64_INLINE TRUE
155
156/*
157 * The interrupt mask disables only normal interrupts (IRQ).
158 *
159 * In order to support fast interrupts (FIQ) such that they can do something
160 * useful, we have to disable the operating system support for FIQs.  Having
161 * operating system support for them would require that FIQs are disabled
162 * during critical sections of the operating system and application.  At this
163 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
164 * the non critical sections of IRQs, so here they would have a small
165 * advantage.  Without operating system support, the FIQs can execute at any
166 * time (of course not during the service of another FIQ). If someone needs
167 * operating system support for a FIQ, she can trigger a software interrupt and
168 * service the request in a two-step process.
169 */
170#define CPU_MODES_INTERRUPT_MASK 0x80
171
172#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
173
174#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
175
176#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
177
178#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
179
180#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
181
182#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
183
184/* AAPCS, section 4.1, Fundamental Data Types */
185#define CPU_ALIGNMENT 8
186
187#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
188
189/* AAPCS, section 4.3.1, Aggregates */
190#define CPU_PARTITION_ALIGNMENT 4
191
192/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
193#define CPU_STACK_ALIGNMENT 8
194
195/*
196 * Bitfield handler macros.
197 *
198 * If we had a particularly fast function for finding the first
199 * bit set in a word, it would go here. Since we don't (*), we'll
200 * just use the universal macros.
201 *
202 * (*) On ARM V5 and later, there's a CLZ function which could be
203 *     used to implement much quicker than the default macro.
204 */
205
206#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
207
208#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
209
210/** @} */
211
212#ifndef ASM
213
214#ifdef __cplusplus
215extern "C" {
216#endif
217
218/**
219 * @addtogroup ScoreCPU
220 *
221 * @{
222 */
223
224typedef struct {
225#if defined(ARM_MULTILIB_ARCH_V4)
226  uint32_t register_cpsr;
227  uint32_t register_r4;
228  uint32_t register_r5;
229  uint32_t register_r6;
230  uint32_t register_r7;
231  uint32_t register_r8;
232  uint32_t register_r9;
233  uint32_t register_r10;
234  uint32_t register_fp;
235  uint32_t register_sp;
236  uint32_t register_lr;
237#elif defined(ARM_MULTILIB_ARCH_V7M)
238  uint32_t register_r4;
239  uint32_t register_r5;
240  uint32_t register_r6;
241  uint32_t register_r7;
242  uint32_t register_r8;
243  uint32_t register_r9;
244  uint32_t register_r10;
245  uint32_t register_r11;
246  void *register_lr;
247  void *register_sp;
248  uint32_t isr_nest_level;
249#else
250  void *register_sp;
251#endif
252} Context_Control;
253
254typedef struct {
255  /* Not supported */
256} Context_Control_fp;
257
258SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
259
260extern uint32_t arm_cpu_mode;
261
262static inline uint32_t arm_interrupt_disable( void )
263{
264  uint32_t level;
265
266#if defined(ARM_MULTILIB_ARCH_V4)
267  uint32_t arm_switch_reg;
268
269  __asm__ volatile (
270    ARM_SWITCH_TO_ARM
271    "mrs %[level], cpsr\n"
272    "orr %[arm_switch_reg], %[level], #0x80\n"
273    "msr cpsr, %[arm_switch_reg]\n"
274    ARM_SWITCH_BACK
275    : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
276  );
277#elif defined(ARM_MULTILIB_ARCH_V7M)
278  uint32_t basepri = 0x80;
279
280  __asm__ volatile (
281    "mrs %[level], basepri\n"
282    "msr basepri_max, %[basepri]\n"
283    : [level] "=&r" (level)
284    : [basepri] "r" (basepri)
285  );
286#else
287  level = 0;
288#endif
289
290  return level;
291}
292
293static inline void arm_interrupt_enable( uint32_t level )
294{
295#if defined(ARM_MULTILIB_ARCH_V4)
296  ARM_SWITCH_REGISTERS;
297
298  __asm__ volatile (
299    ARM_SWITCH_TO_ARM
300    "msr cpsr, %[level]\n"
301    ARM_SWITCH_BACK
302    : ARM_SWITCH_OUTPUT
303    : [level] "r" (level)
304  );
305#elif defined(ARM_MULTILIB_ARCH_V7M)
306  __asm__ volatile (
307    "msr basepri, %[level]\n"
308    :
309    : [level] "r" (level)
310  );
311#endif
312}
313
314static inline void arm_interrupt_flash( uint32_t level )
315{
316#if defined(ARM_MULTILIB_ARCH_V4)
317  uint32_t arm_switch_reg;
318
319  __asm__ volatile (
320    ARM_SWITCH_TO_ARM
321    "mrs %[arm_switch_reg], cpsr\n"
322    "msr cpsr, %[level]\n"
323    "msr cpsr, %[arm_switch_reg]\n"
324    ARM_SWITCH_BACK
325    : [arm_switch_reg] "=&r" (arm_switch_reg)
326    : [level] "r" (level)
327  );
328#elif defined(ARM_MULTILIB_ARCH_V7M)
329  uint32_t basepri;
330
331  __asm__ volatile (
332    "mrs %[basepri], basepri\n"
333    "msr basepri, %[level]\n"
334    "msr basepri, %[basepri]\n"
335    : [basepri] "=&r" (basepri)
336    : [level] "r" (level)
337  );
338#endif
339}
340
341#define _CPU_ISR_Disable( _isr_cookie ) \
342  do { \
343    _isr_cookie = arm_interrupt_disable(); \
344  } while (0)
345
346#define _CPU_ISR_Enable( _isr_cookie )  \
347  arm_interrupt_enable( _isr_cookie )
348
349#define _CPU_ISR_Flash( _isr_cookie ) \
350  arm_interrupt_flash( _isr_cookie )
351
352void _CPU_ISR_Set_level( uint32_t level );
353
354uint32_t _CPU_ISR_Get_level( void );
355
356void _CPU_Context_Initialize(
357  Context_Control *the_context,
358  void *stack_area_begin,
359  size_t stack_area_size,
360  uint32_t new_level,
361  void (*entry_point)( void ),
362  bool is_fp
363);
364
365#define _CPU_Context_Get_SP( _context ) \
366  (_context)->register_sp
367
368#define _CPU_Context_Restart_self( _the_context ) \
369   _CPU_Context_restore( (_the_context) );
370
371#define _CPU_Context_Fp_start( _base, _offset ) \
372   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
373
374#define _CPU_Context_Initialize_fp( _destination ) \
375  do { \
376    *(*(_destination)) = _CPU_Null_fp_context; \
377  } while (0)
378
379#define _CPU_Fatal_halt( _err )             \
380   do {                                     \
381     uint32_t _level;                       \
382     uint32_t _error = _err;                \
383     _CPU_ISR_Disable( _level );            \
384     __asm__ volatile ("mov r0, %0\n"           \
385                   : "=r" (_error)          \
386                   : "0" (_error)           \
387                   : "r0" );                \
388     while (1);                             \
389   } while (0);
390
391void _CPU_Initialize( void );
392
393#define _CPU_Initialize_vectors()
394
395void _CPU_ISR_install_vector(
396  uint32_t vector,
397  proc_ptr new_handler,
398  proc_ptr *old_handler
399);
400
401void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
402
403void _CPU_Context_restore( Context_Control *new_context )
404  RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
405
406#if defined(ARM_MULTILIB_ARCH_V7M)
407  void _ARMV7M_Start_multitasking( Context_Control *bsp, Context_Control *heir );
408  void _ARMV7M_Stop_multitasking( Context_Control *bsp )
409    RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
410  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
411  #define _CPU_Stop_multitasking _ARMV7M_Stop_multitasking
412#endif
413
414void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
415
416void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
417
418static inline uint32_t CPU_swap_u32( uint32_t value )
419{
420#if defined(__thumb2__)
421  __asm__ volatile (
422    "rev %0, %0"
423    : "=r" (value)
424    : "0" (value)
425  );
426  return value;
427#elif defined(__thumb__)
428  uint32_t byte1, byte2, byte3, byte4, swapped;
429
430  byte4 = (value >> 24) & 0xff;
431  byte3 = (value >> 16) & 0xff;
432  byte2 = (value >> 8)  & 0xff;
433  byte1 =  value & 0xff;
434
435  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
436  return swapped;
437#else
438  uint32_t tmp = value; /* make compiler warnings go away */
439  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
440                "BIC %1, %1, #0xff0000\n"
441                "MOV %0, %0, ROR #8\n"
442                "EOR %0, %0, %1, LSR #8\n"
443                : "=r" (value), "=r" (tmp)
444                : "0" (value), "1" (tmp));
445  return value;
446#endif
447}
448
449static inline uint16_t CPU_swap_u16( uint16_t value )
450{
451#if defined(__thumb2__)
452  __asm__ volatile (
453    "rev16 %0, %0"
454    : "=r" (value)
455    : "0" (value)
456  );
457  return value;
458#else
459  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
460#endif
461}
462
463/** @} */
464
465#if defined(ARM_MULTILIB_ARCH_V4)
466
467/**
468 * @addtogroup ScoreCPUARM
469 *
470 * @{
471 */
472
473typedef struct {
474  uint32_t r0;
475  uint32_t r1;
476  uint32_t r2;
477  uint32_t r3;
478  uint32_t r4;
479  uint32_t r5;
480  uint32_t r6;
481  uint32_t r7;
482  uint32_t r8;
483  uint32_t r9;
484  uint32_t r10;
485  uint32_t r11;
486  uint32_t r12;
487  uint32_t sp;
488  uint32_t lr;
489  uint32_t pc;
490  uint32_t cpsr;
491} arm_cpu_context;
492
493typedef void arm_exc_abort_handler( arm_cpu_context *context );
494
495typedef enum {
496  ARM_EXCEPTION_RESET = 0,
497  ARM_EXCEPTION_UNDEF = 1,
498  ARM_EXCEPTION_SWI = 2,
499  ARM_EXCEPTION_PREF_ABORT = 3,
500  ARM_EXCEPTION_DATA_ABORT = 4,
501  ARM_EXCEPTION_RESERVED = 5,
502  ARM_EXCEPTION_IRQ = 6,
503  ARM_EXCEPTION_FIQ = 7,
504  MAX_EXCEPTIONS = 8
505} Arm_symbolic_exception_name;
506
507static inline uint32_t arm_status_irq_enable( void )
508{
509  uint32_t arm_switch_reg;
510  uint32_t psr;
511
512  RTEMS_COMPILER_MEMORY_BARRIER();
513
514  __asm__ volatile (
515    ARM_SWITCH_TO_ARM
516    "mrs %[psr], cpsr\n"
517    "bic %[arm_switch_reg], %[psr], #0x80\n"
518    "msr cpsr, %[arm_switch_reg]\n"
519    ARM_SWITCH_BACK
520    : [arm_switch_reg] "=&r" (arm_switch_reg), [psr] "=&r" (psr)
521  );
522
523  return psr;
524}
525
526static inline void arm_status_restore( uint32_t psr )
527{
528  ARM_SWITCH_REGISTERS;
529
530  __asm__ volatile (
531    ARM_SWITCH_TO_ARM
532    "msr cpsr, %[psr]\n"
533    ARM_SWITCH_BACK
534    : ARM_SWITCH_OUTPUT
535    : [psr] "r" (psr)
536  );
537
538  RTEMS_COMPILER_MEMORY_BARRIER();
539}
540
541void arm_exc_data_abort_set_handler( arm_exc_abort_handler handler );
542
543void arm_exc_data_abort( void );
544
545void arm_exc_prefetch_abort_set_handler( arm_exc_abort_handler handler );
546
547void arm_exc_prefetch_abort( void );
548
549void bsp_interrupt_dispatch( void );
550
551void arm_exc_interrupt( void );
552
553void arm_exc_undefined( void );
554
555/** @} */
556
557/* XXX This is out of date */
558typedef struct {
559  uint32_t register_r0;
560  uint32_t register_r1;
561  uint32_t register_r2;
562  uint32_t register_r3;
563  uint32_t register_ip;
564  uint32_t register_lr;
565} CPU_Exception_frame;
566
567typedef CPU_Exception_frame CPU_Interrupt_frame;
568
569#else /* !defined(ARM_MULTILIB_ARCH_V4) */
570
571typedef void CPU_Interrupt_frame;
572
573#endif /* !defined(ARM_MULTILIB_ARCH_V4) */
574
575#ifdef __cplusplus
576}
577#endif
578
579#endif /* ASM */
580
581#endif /* _RTEMS_SCORE_CPU_H */