PageRenderTime 60ms CodeModel.GetById 28ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/s390/kernel/early.c

https://github.com/tklauser/linux-nios2
C | 584 lines | 457 code | 78 blank | 49 comment | 73 complexity | 8a9304254c0f8118be6e775cfd6774d1 MD5 | raw file
  1. /*
  2. * Copyright IBM Corp. 2007, 2009
  3. * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
  4. * Heiko Carstens <heiko.carstens@de.ibm.com>
  5. */
  6. #define KMSG_COMPONENT "setup"
  7. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8. #include <linux/compiler.h>
  9. #include <linux/init.h>
  10. #include <linux/errno.h>
  11. #include <linux/string.h>
  12. #include <linux/ctype.h>
  13. #include <linux/lockdep.h>
  14. #include <linux/extable.h>
  15. #include <linux/pfn.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/kernel.h>
  18. #include <asm/diag.h>
  19. #include <asm/ebcdic.h>
  20. #include <asm/ipl.h>
  21. #include <asm/lowcore.h>
  22. #include <asm/processor.h>
  23. #include <asm/sections.h>
  24. #include <asm/setup.h>
  25. #include <asm/sysinfo.h>
  26. #include <asm/cpcmd.h>
  27. #include <asm/sclp.h>
  28. #include <asm/facility.h>
  29. #include "entry.h"
  30. /*
  31. * Create a Kernel NSS if the SAVESYS= parameter is defined
  32. */
  33. #define DEFSYS_CMD_SIZE 128
  34. #define SAVESYS_CMD_SIZE 32
  35. char kernel_nss_name[NSS_NAME_SIZE + 1];
  36. static void __init setup_boot_command_line(void);
  37. /*
  38. * Get the TOD clock running.
  39. */
  40. static void __init reset_tod_clock(void)
  41. {
  42. u64 time;
  43. if (store_tod_clock(&time) == 0)
  44. return;
  45. /* TOD clock not running. Set the clock to Unix Epoch. */
  46. if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
  47. disabled_wait(0);
  48. sched_clock_base_cc = TOD_UNIX_EPOCH;
  49. S390_lowcore.last_update_clock = sched_clock_base_cc;
  50. }
  51. #ifdef CONFIG_SHARED_KERNEL
  52. int __init savesys_ipl_nss(char *cmd, const int cmdlen);
  53. asm(
  54. " .section .init.text,\"ax\",@progbits\n"
  55. " .align 4\n"
  56. " .type savesys_ipl_nss, @function\n"
  57. "savesys_ipl_nss:\n"
  58. " stmg 6,15,48(15)\n"
  59. " lgr 14,3\n"
  60. " sam31\n"
  61. " diag 2,14,0x8\n"
  62. " sam64\n"
  63. " lgr 2,14\n"
  64. " lmg 6,15,48(15)\n"
  65. " br 14\n"
  66. " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
  67. " .previous\n");
  68. static __initdata char upper_command_line[COMMAND_LINE_SIZE];
  69. static noinline __init void create_kernel_nss(void)
  70. {
  71. unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
  72. #ifdef CONFIG_BLK_DEV_INITRD
  73. unsigned int sinitrd_pfn, einitrd_pfn;
  74. #endif
  75. int response;
  76. int hlen;
  77. size_t len;
  78. char *savesys_ptr;
  79. char defsys_cmd[DEFSYS_CMD_SIZE];
  80. char savesys_cmd[SAVESYS_CMD_SIZE];
  81. /* Do nothing if we are not running under VM */
  82. if (!MACHINE_IS_VM)
  83. return;
  84. /* Convert COMMAND_LINE to upper case */
  85. for (i = 0; i < strlen(boot_command_line); i++)
  86. upper_command_line[i] = toupper(boot_command_line[i]);
  87. savesys_ptr = strstr(upper_command_line, "SAVESYS=");
  88. if (!savesys_ptr)
  89. return;
  90. savesys_ptr += 8; /* Point to the beginning of the NSS name */
  91. for (i = 0; i < NSS_NAME_SIZE; i++) {
  92. if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
  93. break;
  94. kernel_nss_name[i] = savesys_ptr[i];
  95. }
  96. stext_pfn = PFN_DOWN(__pa(&_stext));
  97. eshared_pfn = PFN_DOWN(__pa(&_eshared));
  98. end_pfn = PFN_UP(__pa(&_end));
  99. min_size = end_pfn << 2;
  100. hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
  101. "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
  102. kernel_nss_name, stext_pfn - 1, stext_pfn,
  103. eshared_pfn - 1, eshared_pfn, end_pfn);
  104. #ifdef CONFIG_BLK_DEV_INITRD
  105. if (INITRD_START && INITRD_SIZE) {
  106. sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
  107. einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
  108. min_size = einitrd_pfn << 2;
  109. hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
  110. " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
  111. }
  112. #endif
  113. snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
  114. " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
  115. defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
  116. snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
  117. kernel_nss_name, kernel_nss_name);
  118. savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
  119. __cpcmd(defsys_cmd, NULL, 0, &response);
  120. if (response != 0) {
  121. pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
  122. response);
  123. kernel_nss_name[0] = '\0';
  124. return;
  125. }
  126. len = strlen(savesys_cmd);
  127. ASCEBC(savesys_cmd, len);
  128. response = savesys_ipl_nss(savesys_cmd, len);
  129. /* On success: response is equal to the command size,
  130. * max SAVESYS_CMD_SIZE
  131. * On error: response contains the numeric portion of cp error message.
  132. * for SAVESYS it will be >= 263
  133. * for missing privilege class, it will be 1
  134. */
  135. if (response > SAVESYS_CMD_SIZE || response == 1) {
  136. pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
  137. response);
  138. kernel_nss_name[0] = '\0';
  139. return;
  140. }
  141. /* re-initialize cputime accounting. */
  142. sched_clock_base_cc = get_tod_clock();
  143. S390_lowcore.last_update_clock = sched_clock_base_cc;
  144. S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
  145. S390_lowcore.user_timer = 0;
  146. S390_lowcore.system_timer = 0;
  147. asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
  148. /* re-setup boot command line with new ipl vm parms */
  149. ipl_update_parameters();
  150. setup_boot_command_line();
  151. ipl_flags = IPL_NSS_VALID;
  152. }
  153. #else /* CONFIG_SHARED_KERNEL */
  154. static inline void create_kernel_nss(void) { }
  155. #endif /* CONFIG_SHARED_KERNEL */
  156. /*
  157. * Clear bss memory
  158. */
  159. static noinline __init void clear_bss_section(void)
  160. {
  161. memset(__bss_start, 0, __bss_stop - __bss_start);
  162. }
  163. /*
  164. * Initialize storage key for kernel pages
  165. */
  166. static noinline __init void init_kernel_storage_key(void)
  167. {
  168. #if PAGE_DEFAULT_KEY
  169. unsigned long end_pfn, init_pfn;
  170. end_pfn = PFN_UP(__pa(&_end));
  171. for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
  172. page_set_storage_key(init_pfn << PAGE_SHIFT,
  173. PAGE_DEFAULT_KEY, 0);
  174. #endif
  175. }
  176. static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
  177. static noinline __init void detect_machine_type(void)
  178. {
  179. struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
  180. /* Check current-configuration-level */
  181. if (stsi(NULL, 0, 0, 0) <= 2) {
  182. S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
  183. return;
  184. }
  185. /* Get virtual-machine cpu information. */
  186. if (stsi(vmms, 3, 2, 2) || !vmms->count)
  187. return;
  188. /* Running under KVM? If not we assume z/VM */
  189. if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
  190. S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
  191. else
  192. S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
  193. }
  194. /* Remove leading, trailing and double whitespace. */
  195. static inline void strim_all(char *str)
  196. {
  197. char *s;
  198. s = strim(str);
  199. if (s != str)
  200. memmove(str, s, strlen(s));
  201. while (*str) {
  202. if (!isspace(*str++))
  203. continue;
  204. if (isspace(*str)) {
  205. s = skip_spaces(str);
  206. memmove(str, s, strlen(s) + 1);
  207. }
  208. }
  209. }
  210. static noinline __init void setup_arch_string(void)
  211. {
  212. struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
  213. struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
  214. char mstr[80], hvstr[17];
  215. if (stsi(mach, 1, 1, 1))
  216. return;
  217. EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
  218. EBCASC(mach->type, sizeof(mach->type));
  219. EBCASC(mach->model, sizeof(mach->model));
  220. EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
  221. sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
  222. mach->manufacturer, mach->type,
  223. mach->model, mach->model_capacity);
  224. strim_all(mstr);
  225. if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
  226. EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
  227. sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
  228. strim_all(hvstr);
  229. } else {
  230. sprintf(hvstr, "%s",
  231. MACHINE_IS_LPAR ? "LPAR" :
  232. MACHINE_IS_VM ? "z/VM" :
  233. MACHINE_IS_KVM ? "KVM" : "unknown");
  234. }
  235. dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
  236. }
  237. static __init void setup_topology(void)
  238. {
  239. int max_mnest;
  240. if (!test_facility(11))
  241. return;
  242. S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
  243. for (max_mnest = 6; max_mnest > 1; max_mnest--) {
  244. if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
  245. break;
  246. }
  247. topology_max_mnest = max_mnest;
  248. }
  249. static void early_pgm_check_handler(void)
  250. {
  251. const struct exception_table_entry *fixup;
  252. unsigned long cr0, cr0_new;
  253. unsigned long addr;
  254. addr = S390_lowcore.program_old_psw.addr;
  255. fixup = search_exception_tables(addr);
  256. if (!fixup)
  257. disabled_wait(0);
  258. /* Disable low address protection before storing into lowcore. */
  259. __ctl_store(cr0, 0, 0);
  260. cr0_new = cr0 & ~(1UL << 28);
  261. __ctl_load(cr0_new, 0, 0);
  262. S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
  263. __ctl_load(cr0, 0, 0);
  264. }
  265. static noinline __init void setup_lowcore_early(void)
  266. {
  267. psw_t psw;
  268. psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
  269. psw.addr = (unsigned long) s390_base_ext_handler;
  270. S390_lowcore.external_new_psw = psw;
  271. psw.addr = (unsigned long) s390_base_pgm_handler;
  272. S390_lowcore.program_new_psw = psw;
  273. s390_base_pgm_handler_fn = early_pgm_check_handler;
  274. S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
  275. }
  276. static noinline __init void setup_facility_list(void)
  277. {
  278. stfle(S390_lowcore.stfle_fac_list,
  279. ARRAY_SIZE(S390_lowcore.stfle_fac_list));
  280. }
  281. static __init void detect_diag9c(void)
  282. {
  283. unsigned int cpu_address;
  284. int rc;
  285. cpu_address = stap();
  286. diag_stat_inc(DIAG_STAT_X09C);
  287. asm volatile(
  288. " diag %2,0,0x9c\n"
  289. "0: la %0,0\n"
  290. "1:\n"
  291. EX_TABLE(0b,1b)
  292. : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
  293. if (!rc)
  294. S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
  295. }
  296. static __init void detect_diag44(void)
  297. {
  298. int rc;
  299. diag_stat_inc(DIAG_STAT_X044);
  300. asm volatile(
  301. " diag 0,0,0x44\n"
  302. "0: la %0,0\n"
  303. "1:\n"
  304. EX_TABLE(0b,1b)
  305. : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
  306. if (!rc)
  307. S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
  308. }
  309. static __init void detect_machine_facilities(void)
  310. {
  311. if (test_facility(8)) {
  312. S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
  313. __ctl_set_bit(0, 23);
  314. }
  315. if (test_facility(78))
  316. S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
  317. if (test_facility(3))
  318. S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
  319. if (test_facility(40))
  320. S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
  321. if (test_facility(50) && test_facility(73))
  322. S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
  323. if (test_facility(51))
  324. S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
  325. if (test_facility(129)) {
  326. S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
  327. __ctl_set_bit(0, 17);
  328. }
  329. if (test_facility(130)) {
  330. S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
  331. __ctl_set_bit(0, 20);
  332. }
  333. if (test_facility(133))
  334. S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
  335. }
  336. static inline void save_vector_registers(void)
  337. {
  338. #ifdef CONFIG_CRASH_DUMP
  339. if (test_facility(129))
  340. save_vx_regs(boot_cpu_vector_save_area);
  341. #endif
  342. }
  343. static int __init topology_setup(char *str)
  344. {
  345. bool enabled;
  346. int rc;
  347. rc = kstrtobool(str, &enabled);
  348. if (!rc && !enabled)
  349. S390_lowcore.machine_flags &= ~MACHINE_FLAG_TOPOLOGY;
  350. return rc;
  351. }
  352. early_param("topology", topology_setup);
  353. static int __init disable_vector_extension(char *str)
  354. {
  355. S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
  356. __ctl_clear_bit(0, 17);
  357. return 1;
  358. }
  359. early_param("novx", disable_vector_extension);
  360. static int __init noexec_setup(char *str)
  361. {
  362. bool enabled;
  363. int rc;
  364. rc = kstrtobool(str, &enabled);
  365. if (!rc && !enabled) {
  366. /* Disable no-execute support */
  367. S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
  368. __ctl_clear_bit(0, 20);
  369. }
  370. return rc;
  371. }
  372. early_param("noexec", noexec_setup);
  373. static int __init cad_setup(char *str)
  374. {
  375. bool enabled;
  376. int rc;
  377. rc = kstrtobool(str, &enabled);
  378. if (!rc && enabled && test_facility(128))
  379. /* Enable problem state CAD. */
  380. __ctl_set_bit(2, 3);
  381. return rc;
  382. }
  383. early_param("cad", cad_setup);
  384. static __init void memmove_early(void *dst, const void *src, size_t n)
  385. {
  386. unsigned long addr;
  387. long incr;
  388. psw_t old;
  389. if (!n)
  390. return;
  391. incr = 1;
  392. if (dst > src) {
  393. incr = -incr;
  394. dst += n - 1;
  395. src += n - 1;
  396. }
  397. old = S390_lowcore.program_new_psw;
  398. S390_lowcore.program_new_psw.mask = __extract_psw();
  399. asm volatile(
  400. " larl %[addr],1f\n"
  401. " stg %[addr],%[psw_pgm_addr]\n"
  402. "0: mvc 0(1,%[dst]),0(%[src])\n"
  403. " agr %[dst],%[incr]\n"
  404. " agr %[src],%[incr]\n"
  405. " brctg %[n],0b\n"
  406. "1:\n"
  407. : [addr] "=&d" (addr),
  408. [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
  409. [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
  410. : [incr] "d" (incr)
  411. : "cc", "memory");
  412. S390_lowcore.program_new_psw = old;
  413. }
  414. static __init noinline void ipl_save_parameters(void)
  415. {
  416. void *src, *dst;
  417. src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
  418. dst = (void *) IPL_PARMBLOCK_ORIGIN;
  419. memmove_early(dst, src, PAGE_SIZE);
  420. S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
  421. }
  422. static __init noinline void rescue_initrd(void)
  423. {
  424. #ifdef CONFIG_BLK_DEV_INITRD
  425. unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
  426. /*
  427. * Just like in case of IPL from VM reader we make sure there is a
  428. * gap of 4MB between end of kernel and start of initrd.
  429. * That way we can also be sure that saving an NSS will succeed,
  430. * which however only requires different segments.
  431. */
  432. if (!INITRD_START || !INITRD_SIZE)
  433. return;
  434. if (INITRD_START >= min_initrd_addr)
  435. return;
  436. memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
  437. INITRD_START = min_initrd_addr;
  438. #endif
  439. }
  440. /* Set up boot command line */
  441. static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
  442. {
  443. char *parm, *delim;
  444. size_t rc, len;
  445. len = strlen(boot_command_line);
  446. delim = boot_command_line + len; /* '\0' character position */
  447. parm = boot_command_line + len + 1; /* append right after '\0' */
  448. rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
  449. if (rc) {
  450. if (*parm == '=')
  451. memmove(boot_command_line, parm + 1, rc);
  452. else
  453. *delim = ' '; /* replace '\0' with space */
  454. }
  455. }
  456. static inline int has_ebcdic_char(const char *str)
  457. {
  458. int i;
  459. for (i = 0; str[i]; i++)
  460. if (str[i] & 0x80)
  461. return 1;
  462. return 0;
  463. }
  464. static void __init setup_boot_command_line(void)
  465. {
  466. COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
  467. /* convert arch command line to ascii if necessary */
  468. if (has_ebcdic_char(COMMAND_LINE))
  469. EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
  470. /* copy arch command line */
  471. strlcpy(boot_command_line, strstrip(COMMAND_LINE),
  472. ARCH_COMMAND_LINE_SIZE);
  473. /* append IPL PARM data to the boot command line */
  474. if (MACHINE_IS_VM)
  475. append_to_cmdline(append_ipl_vmparm);
  476. append_to_cmdline(append_ipl_scpdata);
  477. }
  478. /*
  479. * Save ipl parameters, clear bss memory, initialize storage keys
  480. * and create a kernel NSS at startup if the SAVESYS= parm is defined
  481. */
  482. void __init startup_init(void)
  483. {
  484. reset_tod_clock();
  485. ipl_save_parameters();
  486. rescue_initrd();
  487. clear_bss_section();
  488. ipl_verify_parameters();
  489. time_early_init();
  490. init_kernel_storage_key();
  491. lockdep_off();
  492. setup_lowcore_early();
  493. setup_facility_list();
  494. detect_machine_type();
  495. setup_arch_string();
  496. ipl_update_parameters();
  497. setup_boot_command_line();
  498. create_kernel_nss();
  499. detect_diag9c();
  500. detect_diag44();
  501. detect_machine_facilities();
  502. save_vector_registers();
  503. setup_topology();
  504. sclp_early_detect();
  505. lockdep_on();
  506. }