/kern_2.6.32/arch/mips/cavium-octeon/smp.c

http://omnia2droid.googlecode.com/ · C · 444 lines · 305 code · 74 blank · 65 comment · 27 complexity · 25a000d2ad93cec3934dd09480f3d8ea MD5 · raw file

  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/init.h>
  10. #include <linux/delay.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/sched.h>
  15. #include <linux/module.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/system.h>
  18. #include <asm/time.h>
  19. #include <asm/octeon/octeon.h>
  20. #include "octeon_boot.h"
  21. volatile unsigned long octeon_processor_boot = 0xff;
  22. volatile unsigned long octeon_processor_sp;
  23. volatile unsigned long octeon_processor_gp;
  24. #ifdef CONFIG_HOTPLUG_CPU
  25. static unsigned int InitTLBStart_addr;
  26. #endif
  27. static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
  28. {
  29. const int coreid = cvmx_get_core_num();
  30. uint64_t action;
  31. /* Load the mailbox register to figure out what we're supposed to do */
  32. action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
  33. /* Clear the mailbox to clear the interrupt */
  34. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
  35. if (action & SMP_CALL_FUNCTION)
  36. smp_call_function_interrupt();
  37. /* Check if we've been told to flush the icache */
  38. if (action & SMP_ICACHE_FLUSH)
  39. asm volatile ("synci 0($0)\n");
  40. return IRQ_HANDLED;
  41. }
  42. /**
  43. * Cause the function described by call_data to be executed on the passed
  44. * cpu. When the function has finished, increment the finished field of
  45. * call_data.
  46. */
  47. void octeon_send_ipi_single(int cpu, unsigned int action)
  48. {
  49. int coreid = cpu_logical_map(cpu);
  50. /*
  51. pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
  52. coreid, action);
  53. */
  54. cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
  55. }
  56. static inline void octeon_send_ipi_mask(const struct cpumask *mask,
  57. unsigned int action)
  58. {
  59. unsigned int i;
  60. for_each_cpu_mask(i, *mask)
  61. octeon_send_ipi_single(i, action);
  62. }
  63. /**
  64. * Detect available CPUs, populate cpu_possible_map
  65. */
  66. static void octeon_smp_hotplug_setup(void)
  67. {
  68. #ifdef CONFIG_HOTPLUG_CPU
  69. uint32_t labi_signature;
  70. labi_signature =
  71. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  72. LABI_ADDR_IN_BOOTLOADER +
  73. offsetof(struct linux_app_boot_info,
  74. labi_signature)));
  75. if (labi_signature != LABI_SIGNATURE)
  76. pr_err("The bootloader version on this board is incorrect\n");
  77. InitTLBStart_addr =
  78. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  79. LABI_ADDR_IN_BOOTLOADER +
  80. offsetof(struct linux_app_boot_info,
  81. InitTLBStart_addr)));
  82. #endif
  83. }
  84. static void octeon_smp_setup(void)
  85. {
  86. const int coreid = cvmx_get_core_num();
  87. int cpus;
  88. int id;
  89. int core_mask = octeon_get_boot_coremask();
  90. cpus_clear(cpu_possible_map);
  91. __cpu_number_map[coreid] = 0;
  92. __cpu_logical_map[0] = coreid;
  93. cpu_set(0, cpu_possible_map);
  94. cpus = 1;
  95. for (id = 0; id < 16; id++) {
  96. if ((id != coreid) && (core_mask & (1 << id))) {
  97. cpu_set(cpus, cpu_possible_map);
  98. __cpu_number_map[id] = cpus;
  99. __cpu_logical_map[cpus] = id;
  100. cpus++;
  101. }
  102. }
  103. cpu_present_map = cpu_possible_map;
  104. octeon_smp_hotplug_setup();
  105. }
  106. /**
  107. * Firmware CPU startup hook
  108. *
  109. */
  110. static void octeon_boot_secondary(int cpu, struct task_struct *idle)
  111. {
  112. int count;
  113. pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
  114. cpu_logical_map(cpu));
  115. octeon_processor_sp = __KSTK_TOS(idle);
  116. octeon_processor_gp = (unsigned long)(task_thread_info(idle));
  117. octeon_processor_boot = cpu_logical_map(cpu);
  118. mb();
  119. count = 10000;
  120. while (octeon_processor_sp && count) {
  121. /* Waiting for processor to get the SP and GP */
  122. udelay(1);
  123. count--;
  124. }
  125. if (count == 0)
  126. pr_err("Secondary boot timeout\n");
  127. }
  128. /**
  129. * After we've done initial boot, this function is called to allow the
  130. * board code to clean up state, if needed
  131. */
  132. static void octeon_init_secondary(void)
  133. {
  134. const int coreid = cvmx_get_core_num();
  135. union cvmx_ciu_intx_sum0 interrupt_enable;
  136. #ifdef CONFIG_HOTPLUG_CPU
  137. unsigned int cur_exception_base;
  138. cur_exception_base = cvmx_read64_uint32(
  139. CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  140. LABI_ADDR_IN_BOOTLOADER +
  141. offsetof(struct linux_app_boot_info,
  142. cur_exception_base)));
  143. /* cur_exception_base is incremented in bootloader after setting */
  144. write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
  145. #endif
  146. octeon_check_cpu_bist();
  147. octeon_init_cvmcount();
  148. /*
  149. pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
  150. */
  151. /* Enable Mailbox interrupts to this core. These are the only
  152. interrupts allowed on line 3 */
  153. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
  154. interrupt_enable.u64 = 0;
  155. interrupt_enable.s.mbox = 0x3;
  156. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
  157. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
  158. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
  159. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
  160. /* Enable core interrupt processing for 2,3 and 7 */
  161. set_c0_status(0x8c01);
  162. }
  163. /**
  164. * Callout to firmware before smp_init
  165. *
  166. */
  167. void octeon_prepare_cpus(unsigned int max_cpus)
  168. {
  169. cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
  170. if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
  171. "mailbox0", mailbox_interrupt)) {
  172. panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
  173. }
  174. if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_DISABLED,
  175. "mailbox1", mailbox_interrupt)) {
  176. panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
  177. }
  178. }
  179. /**
  180. * Last chance for the board code to finish SMP initialization before
  181. * the CPU is "online".
  182. */
  183. static void octeon_smp_finish(void)
  184. {
  185. #ifdef CONFIG_CAVIUM_GDB
  186. unsigned long tmp;
  187. /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
  188. to be not masked by this core so we know the signal is received by
  189. someone */
  190. asm volatile ("dmfc0 %0, $22\n"
  191. "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
  192. #endif
  193. octeon_user_io_init();
  194. /* to generate the first CPU timer interrupt */
  195. write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
  196. }
  197. /**
  198. * Hook for after all CPUs are online
  199. */
  200. static void octeon_cpus_done(void)
  201. {
  202. #ifdef CONFIG_CAVIUM_GDB
  203. unsigned long tmp;
  204. /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
  205. to be not masked by this core so we know the signal is received by
  206. someone */
  207. asm volatile ("dmfc0 %0, $22\n"
  208. "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
  209. #endif
  210. }
  211. #ifdef CONFIG_HOTPLUG_CPU
  212. /* State of each CPU. */
  213. DEFINE_PER_CPU(int, cpu_state);
  214. extern void fixup_irqs(void);
  215. static DEFINE_SPINLOCK(smp_reserve_lock);
  216. static int octeon_cpu_disable(void)
  217. {
  218. unsigned int cpu = smp_processor_id();
  219. if (cpu == 0)
  220. return -EBUSY;
  221. spin_lock(&smp_reserve_lock);
  222. cpu_clear(cpu, cpu_online_map);
  223. cpu_clear(cpu, cpu_callin_map);
  224. local_irq_disable();
  225. fixup_irqs();
  226. local_irq_enable();
  227. flush_cache_all();
  228. local_flush_tlb_all();
  229. spin_unlock(&smp_reserve_lock);
  230. return 0;
  231. }
  232. static void octeon_cpu_die(unsigned int cpu)
  233. {
  234. int coreid = cpu_logical_map(cpu);
  235. uint32_t avail_coremask;
  236. struct cvmx_bootmem_named_block_desc *block_desc;
  237. #ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
  238. /* Disable the watchdog */
  239. cvmx_ciu_wdogx_t ciu_wdog;
  240. ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
  241. ciu_wdog.s.mode = 0;
  242. cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
  243. #endif
  244. while (per_cpu(cpu_state, cpu) != CPU_DEAD)
  245. cpu_relax();
  246. /*
  247. * This is a bit complicated strategics of getting/settig available
  248. * cores mask, copied from bootloader
  249. */
  250. /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
  251. block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
  252. if (!block_desc) {
  253. avail_coremask =
  254. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  255. LABI_ADDR_IN_BOOTLOADER +
  256. offsetof
  257. (struct linux_app_boot_info,
  258. avail_coremask)));
  259. } else { /* alternative, already initialized */
  260. avail_coremask =
  261. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  262. block_desc->base_addr +
  263. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
  264. }
  265. avail_coremask |= 1 << coreid;
  266. /* Setting avail_coremask for bootoct binary */
  267. if (!block_desc) {
  268. cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  269. LABI_ADDR_IN_BOOTLOADER +
  270. offsetof(struct linux_app_boot_info,
  271. avail_coremask)),
  272. avail_coremask);
  273. } else {
  274. cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  275. block_desc->base_addr +
  276. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
  277. avail_coremask);
  278. }
  279. pr_info("Reset core %d. Available Coremask = %x \n", coreid,
  280. avail_coremask);
  281. cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
  282. cvmx_write_csr(CVMX_CIU_PP_RST, 0);
  283. }
  284. void play_dead(void)
  285. {
  286. int coreid = cvmx_get_core_num();
  287. idle_task_exit();
  288. octeon_processor_boot = 0xff;
  289. per_cpu(cpu_state, coreid) = CPU_DEAD;
  290. while (1) /* core will be reset here */
  291. ;
  292. }
  293. extern void kernel_entry(unsigned long arg1, ...);
  294. static void start_after_reset(void)
  295. {
  296. kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
  297. }
  298. int octeon_update_boot_vector(unsigned int cpu)
  299. {
  300. int coreid = cpu_logical_map(cpu);
  301. unsigned int avail_coremask;
  302. struct cvmx_bootmem_named_block_desc *block_desc;
  303. struct boot_init_vector *boot_vect =
  304. (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 +
  305. BOOTLOADER_BOOT_VECTOR);
  306. block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
  307. if (!block_desc) {
  308. avail_coremask =
  309. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  310. LABI_ADDR_IN_BOOTLOADER +
  311. offsetof(struct linux_app_boot_info,
  312. avail_coremask)));
  313. } else { /* alternative, already initialized */
  314. avail_coremask =
  315. cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  316. block_desc->base_addr +
  317. AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
  318. }
  319. if (!(avail_coremask & (1 << coreid))) {
  320. /* core not available, assume, that catched by simple-executive */
  321. cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
  322. cvmx_write_csr(CVMX_CIU_PP_RST, 0);
  323. }
  324. boot_vect[coreid].app_start_func_addr =
  325. (uint32_t) (unsigned long) start_after_reset;
  326. boot_vect[coreid].code_addr = InitTLBStart_addr;
  327. CVMX_SYNC;
  328. cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
  329. return 0;
  330. }
  331. static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
  332. unsigned long action, void *hcpu)
  333. {
  334. unsigned int cpu = (unsigned long)hcpu;
  335. switch (action) {
  336. case CPU_UP_PREPARE:
  337. octeon_update_boot_vector(cpu);
  338. break;
  339. case CPU_ONLINE:
  340. pr_info("Cpu %d online\n", cpu);
  341. break;
  342. case CPU_DEAD:
  343. break;
  344. }
  345. return NOTIFY_OK;
  346. }
  347. static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
  348. .notifier_call = octeon_cpu_callback,
  349. };
  350. static int __cpuinit register_cavium_notifier(void)
  351. {
  352. register_hotcpu_notifier(&octeon_cpu_notifier);
  353. return 0;
  354. }
  355. late_initcall(register_cavium_notifier);
  356. #endif /* CONFIG_HOTPLUG_CPU */
  357. struct plat_smp_ops octeon_smp_ops = {
  358. .send_ipi_single = octeon_send_ipi_single,
  359. .send_ipi_mask = octeon_send_ipi_mask,
  360. .init_secondary = octeon_init_secondary,
  361. .smp_finish = octeon_smp_finish,
  362. .cpus_done = octeon_cpus_done,
  363. .boot_secondary = octeon_boot_secondary,
  364. .smp_setup = octeon_smp_setup,
  365. .prepare_cpus = octeon_prepare_cpus,
  366. #ifdef CONFIG_HOTPLUG_CPU
  367. .cpu_disable = octeon_cpu_disable,
  368. .cpu_die = octeon_cpu_die,
  369. #endif
  370. };