PageRenderTime 46ms CodeModel.GetById 13ms RepoModel.GetById 1ms app.codeStats 0ms

/revue/src/linux/arch/powerpc/kernel/setup-common.c

https://github.com/gtvhacker/Logitech-Revue
C | 598 lines | 440 code | 82 blank | 76 comment | 57 complexity | b9152382ae2e33099d9049b3411fff1e MD5 | raw file
  1. /*
  2. * Common boot and setup code for both 32-bit and 64-bit.
  3. * Extracted from arch/powerpc/kernel/setup_64.c.
  4. *
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #undef DEBUG
  13. #include <linux/module.h>
  14. #include <linux/string.h>
  15. #include <linux/sched.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/reboot.h>
  19. #include <linux/delay.h>
  20. #include <linux/initrd.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/ioport.h>
  24. #include <linux/console.h>
  25. #include <linux/utsname.h>
  26. #include <linux/screen_info.h>
  27. #include <linux/root_dev.h>
  28. #include <linux/notifier.h>
  29. #include <linux/cpu.h>
  30. #include <linux/unistd.h>
  31. #include <linux/serial.h>
  32. #include <linux/serial_8250.h>
  33. #include <linux/debugfs.h>
  34. #include <asm/io.h>
  35. #include <asm/prom.h>
  36. #include <asm/processor.h>
  37. #include <asm/vdso_datapage.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/smp.h>
  40. #include <asm/elf.h>
  41. #include <asm/machdep.h>
  42. #include <asm/time.h>
  43. #include <asm/cputable.h>
  44. #include <asm/sections.h>
  45. #include <asm/firmware.h>
  46. #include <asm/btext.h>
  47. #include <asm/nvram.h>
  48. #include <asm/setup.h>
  49. #include <asm/system.h>
  50. #include <asm/rtas.h>
  51. #include <asm/iommu.h>
  52. #include <asm/serial.h>
  53. #include <asm/cache.h>
  54. #include <asm/page.h>
  55. #include <asm/mmu.h>
  56. #include <asm/lmb.h>
  57. #include <asm/xmon.h>
  58. #include "setup.h"
  59. #ifdef DEBUG
  60. #include <asm/udbg.h>
  61. #define DBG(fmt...) udbg_printf(fmt)
  62. #else
  63. #define DBG(fmt...)
  64. #endif
  65. /* The main machine-dep calls structure
  66. */
  67. struct machdep_calls ppc_md;
  68. EXPORT_SYMBOL(ppc_md);
  69. struct machdep_calls *machine_id;
  70. EXPORT_SYMBOL(machine_id);
  71. unsigned long klimit = (unsigned long) _end;
  72. /*
  73. * This still seems to be needed... -- paulus
  74. */
  75. struct screen_info screen_info = {
  76. .orig_x = 0,
  77. .orig_y = 25,
  78. .orig_video_cols = 80,
  79. .orig_video_lines = 25,
  80. .orig_video_isVGA = 1,
  81. .orig_video_points = 16
  82. };
  83. #ifdef __DO_IRQ_CANON
  84. /* XXX should go elsewhere eventually */
  85. int ppc_do_canonicalize_irqs;
  86. EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
  87. #endif
  88. /* also used by kexec */
  89. void machine_shutdown(void)
  90. {
  91. if (ppc_md.machine_shutdown)
  92. ppc_md.machine_shutdown();
  93. }
  94. void machine_restart(char *cmd)
  95. {
  96. machine_shutdown();
  97. if (ppc_md.restart)
  98. ppc_md.restart(cmd);
  99. #ifdef CONFIG_SMP
  100. smp_send_stop();
  101. #endif
  102. printk(KERN_EMERG "System Halted, OK to turn off power\n");
  103. local_irq_disable();
  104. while (1) ;
  105. }
  106. void machine_power_off(void)
  107. {
  108. machine_shutdown();
  109. if (ppc_md.power_off)
  110. ppc_md.power_off();
  111. #ifdef CONFIG_SMP
  112. smp_send_stop();
  113. #endif
  114. printk(KERN_EMERG "System Halted, OK to turn off power\n");
  115. local_irq_disable();
  116. while (1) ;
  117. }
  118. /* Used by the G5 thermal driver */
  119. EXPORT_SYMBOL_GPL(machine_power_off);
  120. void (*pm_power_off)(void) = machine_power_off;
  121. EXPORT_SYMBOL_GPL(pm_power_off);
  122. void machine_halt(void)
  123. {
  124. machine_shutdown();
  125. if (ppc_md.halt)
  126. ppc_md.halt();
  127. #ifdef CONFIG_SMP
  128. smp_send_stop();
  129. #endif
  130. printk(KERN_EMERG "System Halted, OK to turn off power\n");
  131. local_irq_disable();
  132. while (1) ;
  133. }
  134. #ifdef CONFIG_TAU
  135. extern u32 cpu_temp(unsigned long cpu);
  136. extern u32 cpu_temp_both(unsigned long cpu);
  137. #endif /* CONFIG_TAU */
  138. #ifdef CONFIG_SMP
  139. DEFINE_PER_CPU(unsigned int, pvr);
  140. #endif
  141. static int show_cpuinfo(struct seq_file *m, void *v)
  142. {
  143. unsigned long cpu_id = (unsigned long)v - 1;
  144. unsigned int pvr;
  145. unsigned short maj;
  146. unsigned short min;
  147. if (cpu_id == NR_CPUS) {
  148. #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
  149. unsigned long bogosum = 0;
  150. int i;
  151. for_each_online_cpu(i)
  152. bogosum += loops_per_jiffy;
  153. seq_printf(m, "total bogomips\t: %lu.%02lu\n",
  154. bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
  155. #endif /* CONFIG_SMP && CONFIG_PPC32 */
  156. seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
  157. if (ppc_md.name)
  158. seq_printf(m, "platform\t: %s\n", ppc_md.name);
  159. if (ppc_md.show_cpuinfo != NULL)
  160. ppc_md.show_cpuinfo(m);
  161. return 0;
  162. }
  163. /* We only show online cpus: disable preempt (overzealous, I
  164. * knew) to prevent cpu going down. */
  165. preempt_disable();
  166. if (!cpu_online(cpu_id)) {
  167. preempt_enable();
  168. return 0;
  169. }
  170. #ifdef CONFIG_SMP
  171. pvr = per_cpu(pvr, cpu_id);
  172. #else
  173. pvr = mfspr(SPRN_PVR);
  174. #endif
  175. maj = (pvr >> 8) & 0xFF;
  176. min = pvr & 0xFF;
  177. seq_printf(m, "processor\t: %lu\n", cpu_id);
  178. seq_printf(m, "cpu\t\t: ");
  179. if (cur_cpu_spec->pvr_mask)
  180. seq_printf(m, "%s", cur_cpu_spec->cpu_name);
  181. else
  182. seq_printf(m, "unknown (%08x)", pvr);
  183. #ifdef CONFIG_ALTIVEC
  184. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  185. seq_printf(m, ", altivec supported");
  186. #endif /* CONFIG_ALTIVEC */
  187. seq_printf(m, "\n");
  188. #ifdef CONFIG_TAU
  189. if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
  190. #ifdef CONFIG_TAU_AVERAGE
  191. /* more straightforward, but potentially misleading */
  192. seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
  193. cpu_temp(cpu_id));
  194. #else
  195. /* show the actual temp sensor range */
  196. u32 temp;
  197. temp = cpu_temp_both(cpu_id);
  198. seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
  199. temp & 0xff, temp >> 16);
  200. #endif
  201. }
  202. #endif /* CONFIG_TAU */
  203. /*
  204. * Assume here that all clock rates are the same in a
  205. * smp system. -- Cort
  206. */
  207. if (ppc_proc_freq)
  208. seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
  209. ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
  210. if (ppc_md.show_percpuinfo != NULL)
  211. ppc_md.show_percpuinfo(m, cpu_id);
  212. /* If we are a Freescale core do a simple check so
  213. * we dont have to keep adding cases in the future */
  214. if (PVR_VER(pvr) & 0x8000) {
  215. maj = PVR_MAJ(pvr);
  216. min = PVR_MIN(pvr);
  217. } else {
  218. switch (PVR_VER(pvr)) {
  219. case 0x0020: /* 403 family */
  220. maj = PVR_MAJ(pvr) + 1;
  221. min = PVR_MIN(pvr);
  222. break;
  223. case 0x1008: /* 740P/750P ?? */
  224. maj = ((pvr >> 8) & 0xFF) - 1;
  225. min = pvr & 0xFF;
  226. break;
  227. default:
  228. maj = (pvr >> 8) & 0xFF;
  229. min = pvr & 0xFF;
  230. break;
  231. }
  232. }
  233. seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
  234. maj, min, PVR_VER(pvr), PVR_REV(pvr));
  235. #ifdef CONFIG_PPC32
  236. seq_printf(m, "bogomips\t: %lu.%02lu\n",
  237. loops_per_jiffy / (500000/HZ),
  238. (loops_per_jiffy / (5000/HZ)) % 100);
  239. #endif
  240. #ifdef CONFIG_SMP
  241. seq_printf(m, "\n");
  242. #endif
  243. preempt_enable();
  244. return 0;
  245. }
  246. static void *c_start(struct seq_file *m, loff_t *pos)
  247. {
  248. unsigned long i = *pos;
  249. return i <= NR_CPUS ? (void *)(i + 1) : NULL;
  250. }
  251. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  252. {
  253. ++*pos;
  254. return c_start(m, pos);
  255. }
  256. static void c_stop(struct seq_file *m, void *v)
  257. {
  258. }
  259. struct seq_operations cpuinfo_op = {
  260. .start =c_start,
  261. .next = c_next,
  262. .stop = c_stop,
  263. .show = show_cpuinfo,
  264. };
  265. void __init check_for_initrd(void)
  266. {
  267. #ifdef CONFIG_BLK_DEV_INITRD
  268. DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
  269. initrd_start, initrd_end);
  270. /* If we were passed an initrd, set the ROOT_DEV properly if the values
  271. * look sensible. If not, clear initrd reference.
  272. */
  273. if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
  274. initrd_end > initrd_start)
  275. ROOT_DEV = Root_RAM0;
  276. else
  277. initrd_start = initrd_end = 0;
  278. if (initrd_start)
  279. printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
  280. DBG(" <- check_for_initrd()\n");
  281. #endif /* CONFIG_BLK_DEV_INITRD */
  282. }
  283. #ifdef CONFIG_SMP
  284. /**
  285. * setup_cpu_maps - initialize the following cpu maps:
  286. * cpu_possible_map
  287. * cpu_present_map
  288. * cpu_sibling_map
  289. *
  290. * Having the possible map set up early allows us to restrict allocations
  291. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
  292. *
  293. * We do not initialize the online map here; cpus set their own bits in
  294. * cpu_online_map as they come up.
  295. *
  296. * This function is valid only for Open Firmware systems. finish_device_tree
  297. * must be called before using this.
  298. *
  299. * While we're here, we may as well set the "physical" cpu ids in the paca.
  300. *
  301. * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
  302. */
  303. void __init smp_setup_cpu_maps(void)
  304. {
  305. struct device_node *dn = NULL;
  306. int cpu = 0;
  307. while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
  308. const int *intserv;
  309. int j, len = sizeof(u32), nthreads = 1;
  310. intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
  311. &len);
  312. if (intserv)
  313. nthreads = len / sizeof(int);
  314. else {
  315. intserv = of_get_property(dn, "reg", NULL);
  316. if (!intserv)
  317. intserv = &cpu; /* assume logical == phys */
  318. }
  319. for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
  320. cpu_set(cpu, cpu_present_map);
  321. set_hard_smp_processor_id(cpu, intserv[j]);
  322. cpu_set(cpu, cpu_possible_map);
  323. cpu++;
  324. }
  325. }
  326. #ifdef CONFIG_PPC64
  327. /*
  328. * On pSeries LPAR, we need to know how many cpus
  329. * could possibly be added to this partition.
  330. */
  331. if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
  332. (dn = of_find_node_by_path("/rtas"))) {
  333. int num_addr_cell, num_size_cell, maxcpus;
  334. const unsigned int *ireg;
  335. num_addr_cell = of_n_addr_cells(dn);
  336. num_size_cell = of_n_size_cells(dn);
  337. ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
  338. if (!ireg)
  339. goto out;
  340. maxcpus = ireg[num_addr_cell + num_size_cell];
  341. /* Double maxcpus for processors which have SMT capability */
  342. if (cpu_has_feature(CPU_FTR_SMT))
  343. maxcpus *= 2;
  344. if (maxcpus > NR_CPUS) {
  345. printk(KERN_WARNING
  346. "Partition configured for %d cpus, "
  347. "operating system maximum is %d.\n",
  348. maxcpus, NR_CPUS);
  349. maxcpus = NR_CPUS;
  350. } else
  351. printk(KERN_INFO "Partition configured for %d cpus.\n",
  352. maxcpus);
  353. for (cpu = 0; cpu < maxcpus; cpu++)
  354. cpu_set(cpu, cpu_possible_map);
  355. out:
  356. of_node_put(dn);
  357. }
  358. /*
  359. * Do the sibling map; assume only two threads per processor.
  360. */
  361. for_each_possible_cpu(cpu) {
  362. cpu_set(cpu, cpu_sibling_map[cpu]);
  363. if (cpu_has_feature(CPU_FTR_SMT))
  364. cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
  365. }
  366. vdso_data->processorCount = num_present_cpus();
  367. #endif /* CONFIG_PPC64 */
  368. }
  369. #endif /* CONFIG_SMP */
  370. static __init int add_pcspkr(void)
  371. {
  372. struct device_node *np;
  373. struct platform_device *pd;
  374. int ret;
  375. np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
  376. of_node_put(np);
  377. if (!np)
  378. return -ENODEV;
  379. pd = platform_device_alloc("pcspkr", -1);
  380. if (!pd)
  381. return -ENOMEM;
  382. ret = platform_device_add(pd);
  383. if (ret)
  384. platform_device_put(pd);
  385. return ret;
  386. }
  387. device_initcall(add_pcspkr);
  388. void probe_machine(void)
  389. {
  390. extern struct machdep_calls __machine_desc_start;
  391. extern struct machdep_calls __machine_desc_end;
  392. /*
  393. * Iterate all ppc_md structures until we find the proper
  394. * one for the current machine type
  395. */
  396. DBG("Probing machine type ...\n");
  397. for (machine_id = &__machine_desc_start;
  398. machine_id < &__machine_desc_end;
  399. machine_id++) {
  400. DBG(" %s ...", machine_id->name);
  401. memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
  402. if (ppc_md.probe()) {
  403. DBG(" match !\n");
  404. break;
  405. }
  406. DBG("\n");
  407. }
  408. /* What can we do if we didn't find ? */
  409. if (machine_id >= &__machine_desc_end) {
  410. DBG("No suitable machine found !\n");
  411. for (;;);
  412. }
  413. printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
  414. }
  415. /* Match a class of boards, not a specific device configuration. */
  416. int check_legacy_ioport(unsigned long base_port)
  417. {
  418. struct device_node *parent, *np = NULL;
  419. int ret = -ENODEV;
  420. switch(base_port) {
  421. case I8042_DATA_REG:
  422. if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
  423. np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
  424. if (np) {
  425. parent = of_get_parent(np);
  426. of_node_put(np);
  427. np = parent;
  428. break;
  429. }
  430. np = of_find_node_by_type(NULL, "8042");
  431. /* Pegasos has no device_type on its 8042 node, look for the
  432. * name instead */
  433. if (!np)
  434. np = of_find_node_by_name(NULL, "8042");
  435. break;
  436. case FDC_BASE: /* FDC1 */
  437. np = of_find_node_by_type(NULL, "fdc");
  438. break;
  439. #ifdef CONFIG_PPC_PREP
  440. case _PIDXR:
  441. case _PNPWRP:
  442. case PNPBIOS_BASE:
  443. /* implement me */
  444. #endif
  445. default:
  446. /* ipmi is supposed to fail here */
  447. break;
  448. }
  449. if (!np)
  450. return ret;
  451. parent = of_get_parent(np);
  452. if (parent) {
  453. if (strcmp(parent->type, "isa") == 0)
  454. ret = 0;
  455. of_node_put(parent);
  456. }
  457. of_node_put(np);
  458. return ret;
  459. }
  460. EXPORT_SYMBOL(check_legacy_ioport);
  461. static int ppc_panic_event(struct notifier_block *this,
  462. unsigned long event, void *ptr)
  463. {
  464. ppc_md.panic(ptr); /* May not return */
  465. return NOTIFY_DONE;
  466. }
  467. static struct notifier_block ppc_panic_block = {
  468. .notifier_call = ppc_panic_event,
  469. .priority = INT_MIN /* may not return; must be done last */
  470. };
  471. void __init setup_panic(void)
  472. {
  473. atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
  474. }
  475. #ifdef CONFIG_CHECK_CACHE_COHERENCY
  476. /*
  477. * For platforms that have configurable cache-coherency. This function
  478. * checks that the cache coherency setting of the kernel matches the setting
  479. * left by the firmware, as indicated in the device tree. Since a mismatch
  480. * will eventually result in DMA failures, we print * and error and call
  481. * BUG() in that case.
  482. */
  483. #ifdef CONFIG_NOT_COHERENT_CACHE
  484. #define KERNEL_COHERENCY 0
  485. #else
  486. #define KERNEL_COHERENCY 1
  487. #endif
  488. static int __init check_cache_coherency(void)
  489. {
  490. struct device_node *np;
  491. const void *prop;
  492. int devtree_coherency;
  493. np = of_find_node_by_path("/");
  494. prop = of_get_property(np, "coherency-off", NULL);
  495. of_node_put(np);
  496. devtree_coherency = prop ? 0 : 1;
  497. if (devtree_coherency != KERNEL_COHERENCY) {
  498. printk(KERN_ERR
  499. "kernel coherency:%s != device tree_coherency:%s\n",
  500. KERNEL_COHERENCY ? "on" : "off",
  501. devtree_coherency ? "on" : "off");
  502. BUG();
  503. }
  504. return 0;
  505. }
  506. late_initcall(check_cache_coherency);
  507. #endif /* CONFIG_CHECK_CACHE_COHERENCY */
  508. #ifdef CONFIG_DEBUG_FS
  509. struct dentry *powerpc_debugfs_root;
  510. static int powerpc_debugfs_init(void)
  511. {
  512. powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
  513. return powerpc_debugfs_root == NULL;
  514. }
  515. arch_initcall(powerpc_debugfs_init);
  516. #endif