PageRenderTime 23ms CodeModel.GetById 11ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/tile/include/asm/processor.h

https://github.com/tklauser/linux-nios2
C Header | 375 lines | 218 code | 58 blank | 99 comment | 10 complexity | 948e459cf991832a1a53d4874ee331a9 MD5 | raw file
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_PROCESSOR_H
  15. #define _ASM_TILE_PROCESSOR_H
  16. #include <arch/chip.h>
  17. #ifndef __ASSEMBLY__
  18. /*
  19. * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
  20. * normally would, due to #include dependencies.
  21. */
  22. #include <linux/types.h>
  23. #include <asm/ptrace.h>
  24. #include <asm/percpu.h>
  25. #include <arch/spr_def.h>
  26. struct task_struct;
  27. struct thread_struct;
  28. typedef struct {
  29. unsigned long seg;
  30. } mm_segment_t;
  31. /*
  32. * Default implementation of macro that returns current
  33. * instruction pointer ("program counter").
  34. */
  35. void *current_text_addr(void);
  36. #if CHIP_HAS_TILE_DMA()
  37. /* Capture the state of a suspended DMA. */
  38. struct tile_dma_state {
  39. int enabled;
  40. unsigned long src;
  41. unsigned long dest;
  42. unsigned long strides;
  43. unsigned long chunk_size;
  44. unsigned long src_chunk;
  45. unsigned long dest_chunk;
  46. unsigned long byte;
  47. unsigned long status;
  48. };
  49. /*
  50. * A mask of the DMA status register for selecting only the 'running'
  51. * and 'done' bits.
  52. */
  53. #define DMA_STATUS_MASK \
  54. (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
  55. #endif
  56. /*
  57. * Track asynchronous TLB events (faults and access violations)
  58. * that occur while we are in kernel mode from DMA or the SN processor.
  59. */
  60. struct async_tlb {
  61. short fault_num; /* original fault number; 0 if none */
  62. char is_fault; /* was it a fault (vs an access violation) */
  63. char is_write; /* for fault: was it caused by a write? */
  64. unsigned long address; /* what address faulted? */
  65. };
  66. #ifdef CONFIG_HARDWALL
  67. struct hardwall_info;
  68. struct hardwall_task {
  69. /* Which hardwall is this task tied to? (or NULL if none) */
  70. struct hardwall_info *info;
  71. /* Chains this task into the list at info->task_head. */
  72. struct list_head list;
  73. };
  74. #ifdef __tilepro__
  75. #define HARDWALL_TYPES 1 /* udn */
  76. #else
  77. #define HARDWALL_TYPES 3 /* udn, idn, and ipi */
  78. #endif
  79. #endif
  80. struct thread_struct {
  81. /* kernel stack pointer */
  82. unsigned long ksp;
  83. /* kernel PC */
  84. unsigned long pc;
  85. /* starting user stack pointer (for page migration) */
  86. unsigned long usp0;
  87. /* pid of process that created this one */
  88. pid_t creator_pid;
  89. #if CHIP_HAS_TILE_DMA()
  90. /* DMA info for suspended threads (byte == 0 means no DMA state) */
  91. struct tile_dma_state tile_dma_state;
  92. #endif
  93. /* User EX_CONTEXT registers */
  94. unsigned long ex_context[2];
  95. /* User SYSTEM_SAVE registers */
  96. unsigned long system_save[4];
  97. /* User interrupt mask */
  98. unsigned long long interrupt_mask;
  99. /* User interrupt-control 0 state */
  100. unsigned long intctrl_0;
  101. /* Any other miscellaneous processor state bits */
  102. unsigned long proc_status;
  103. #if !CHIP_HAS_FIXED_INTVEC_BASE()
  104. /* Interrupt base for PL0 interrupts */
  105. unsigned long interrupt_vector_base;
  106. #endif
  107. /* Tile cache retry fifo high-water mark */
  108. unsigned long tile_rtf_hwm;
  109. #if CHIP_HAS_DSTREAM_PF()
  110. /* Data stream prefetch control */
  111. unsigned long dstream_pf;
  112. #endif
  113. #ifdef CONFIG_HARDWALL
  114. /* Hardwall information for various resources. */
  115. struct hardwall_task hardwall[HARDWALL_TYPES];
  116. #endif
  117. #if CHIP_HAS_TILE_DMA()
  118. /* Async DMA TLB fault information */
  119. struct async_tlb dma_async_tlb;
  120. #endif
  121. };
  122. #endif /* !__ASSEMBLY__ */
  123. /*
  124. * Start with "sp" this many bytes below the top of the kernel stack.
  125. * This allows us to be cache-aware when handling the initial save
  126. * of the pt_regs value to the stack.
  127. */
  128. #define STACK_TOP_DELTA 64
  129. /*
  130. * When entering the kernel via a fault, start with the top of the
  131. * pt_regs structure this many bytes below the top of the page.
  132. * This aligns the pt_regs structure optimally for cache-line access.
  133. */
  134. #ifdef __tilegx__
  135. #define KSTK_PTREGS_GAP 48
  136. #else
  137. #define KSTK_PTREGS_GAP 56
  138. #endif
  139. #ifndef __ASSEMBLY__
  140. #ifdef __tilegx__
  141. #define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
  142. #else
  143. #define TASK_SIZE_MAX PAGE_OFFSET
  144. #endif
  145. /* TASK_SIZE and related variables are always checked in "current" context. */
  146. #ifdef CONFIG_COMPAT
  147. #define COMPAT_TASK_SIZE (1UL << 31)
  148. #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
  149. COMPAT_TASK_SIZE : TASK_SIZE_MAX)
  150. #else
  151. #define TASK_SIZE TASK_SIZE_MAX
  152. #endif
  153. #define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
  154. #define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
  155. #define STACK_TOP TASK_SIZE
  156. /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
  157. #define STACK_TOP_MAX TASK_SIZE_MAX
  158. /*
  159. * This decides where the kernel will search for a free chunk of vm
  160. * space during mmap's, if it is using bottom-up mapping.
  161. */
  162. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  163. #define HAVE_ARCH_PICK_MMAP_LAYOUT
  164. #define INIT_THREAD { \
  165. .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
  166. .interrupt_mask = -1ULL \
  167. }
  168. /* Kernel stack top for the task that first boots on this cpu. */
  169. DECLARE_PER_CPU(unsigned long, boot_sp);
  170. /* PC to boot from on this cpu. */
  171. DECLARE_PER_CPU(unsigned long, boot_pc);
  172. /* Do necessary setup to start up a newly executed thread. */
  173. static inline void start_thread(struct pt_regs *regs,
  174. unsigned long pc, unsigned long usp)
  175. {
  176. regs->pc = pc;
  177. regs->sp = usp;
  178. single_step_execve();
  179. }
  180. /* Free all resources held by a thread. */
  181. static inline void release_thread(struct task_struct *dead_task)
  182. {
  183. /* Nothing for now */
  184. }
  185. extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
  186. /*
  187. * Return saved (kernel) PC of a blocked thread.
  188. * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
  189. */
  190. #define thread_saved_pc(t) ((t)->thread.pc)
  191. unsigned long get_wchan(struct task_struct *p);
  192. /* Return initial ksp value for given task. */
  193. #define task_ksp0(task) \
  194. ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
  195. /* Return some info about the user process TASK. */
  196. #define task_pt_regs(task) \
  197. ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
  198. #define current_pt_regs() \
  199. ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
  200. STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
  201. #define task_sp(task) (task_pt_regs(task)->sp)
  202. #define task_pc(task) (task_pt_regs(task)->pc)
  203. /* Aliases for pc and sp (used in fs/proc/array.c) */
  204. #define KSTK_EIP(task) task_pc(task)
  205. #define KSTK_ESP(task) task_sp(task)
  206. /* Fine-grained unaligned JIT support */
  207. #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
  208. #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
  209. extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
  210. extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
  211. /* Standard format for printing registers and other word-size data. */
  212. #ifdef __tilegx__
  213. # define REGFMT "0x%016lx"
  214. #else
  215. # define REGFMT "0x%08lx"
  216. #endif
  217. /*
  218. * Do some slow action (e.g. read a slow SPR).
  219. * Note that this must also have compiler-barrier semantics since
  220. * it may be used in a busy loop reading memory.
  221. */
  222. static inline void cpu_relax(void)
  223. {
  224. __insn_mfspr(SPR_PASS);
  225. barrier();
  226. }
  227. /* Info on this processor (see fs/proc/cpuinfo.c) */
  228. struct seq_operations;
  229. extern const struct seq_operations cpuinfo_op;
  230. /* Provide information about the chip model. */
  231. extern char chip_model[64];
  232. /* Data on which physical memory controller corresponds to which NUMA node. */
  233. extern int node_controller[];
  234. /* Does the heap allocator return hash-for-home pages by default? */
  235. extern int hash_default;
  236. /* Should kernel stack pages be hash-for-home? */
  237. extern int kstack_hash;
  238. /* Does MAP_ANONYMOUS return hash-for-home pages by default? */
  239. #define uheap_hash hash_default
  240. /* Are we using huge pages in the TLB for kernel data? */
  241. extern int kdata_huge;
  242. /* Support standard Linux prefetching. */
  243. #define ARCH_HAS_PREFETCH
  244. #define prefetch(x) __builtin_prefetch(x)
  245. #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
  246. /* Bring a value into the L1D, faulting the TLB if necessary. */
  247. #ifdef __tilegx__
  248. #define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
  249. #else
  250. #define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
  251. #endif
  252. #else /* __ASSEMBLY__ */
  253. /* Do some slow action (e.g. read a slow SPR). */
  254. #define CPU_RELAX mfspr zero, SPR_PASS
  255. #endif /* !__ASSEMBLY__ */
  256. /* Assembly code assumes that the PL is in the low bits. */
  257. #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
  258. # error Fix assembly assumptions about PL
  259. #endif
  260. /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
  261. #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
  262. SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
  263. SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
  264. SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
  265. # error Fix assumptions that EX1 macros work for both PL0 and PL1
  266. #endif
  267. /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
  268. #define EX1_PL(ex1) \
  269. (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
  270. #define EX1_ICS(ex1) \
  271. (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
  272. #define PL_ICS_EX1(pl, ics) \
  273. (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
  274. ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
  275. /*
  276. * Provide symbolic constants for PLs.
  277. */
  278. #define USER_PL 0
  279. #if CONFIG_KERNEL_PL == 2
  280. #define GUEST_PL 1
  281. #endif
  282. #define KERNEL_PL CONFIG_KERNEL_PL
  283. /* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
  284. #ifdef __tilegx__
  285. #define CPU_SHIFT 48
  286. #if CHIP_VA_WIDTH() > CPU_SHIFT
  287. # error Too many VA bits!
  288. #endif
  289. #define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
  290. #define raw_smp_processor_id() \
  291. ((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
  292. #define get_current_ksp0() \
  293. ((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
  294. (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
  295. #define next_current_ksp0(task) ({ \
  296. unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
  297. unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
  298. __ksp0 | __cpu; \
  299. })
  300. #else
  301. #define LOG2_NR_CPU_IDS 6
  302. #define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
  303. #define raw_smp_processor_id() \
  304. ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
  305. #define get_current_ksp0() \
  306. (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
  307. #define next_current_ksp0(task) ({ \
  308. unsigned long __ksp0 = task_ksp0(task); \
  309. int __cpu = raw_smp_processor_id(); \
  310. BUG_ON(__ksp0 & MAX_CPU_ID); \
  311. __ksp0 | __cpu; \
  312. })
  313. #endif
  314. #if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
  315. # error Too many cpus!
  316. #endif
  317. #endif /* _ASM_TILE_PROCESSOR_H */