PageRenderTime 52ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/xen/arch/arm/setup.c

https://gitlab.com/martyros/xen
C | 1098 lines | 744 code | 199 blank | 155 comment | 118 complexity | 44040f23847d875a0aa00b0f4726b782 MD5 | raw file
  1. /*
  2. * xen/arch/arm/setup.c
  3. *
  4. * Early bringup code for an ARMv7-A with virt extensions.
  5. *
  6. * Tim Deegan <tim@xen.org>
  7. * Copyright (c) 2011 Citrix Systems.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <xen/compile.h>
  20. #include <xen/device_tree.h>
  21. #include <xen/domain_page.h>
  22. #include <xen/grant_table.h>
  23. #include <xen/types.h>
  24. #include <xen/string.h>
  25. #include <xen/serial.h>
  26. #include <xen/sched.h>
  27. #include <xen/console.h>
  28. #include <xen/err.h>
  29. #include <xen/init.h>
  30. #include <xen/irq.h>
  31. #include <xen/mm.h>
  32. #include <xen/param.h>
  33. #include <xen/softirq.h>
  34. #include <xen/keyhandler.h>
  35. #include <xen/cpu.h>
  36. #include <xen/pfn.h>
  37. #include <xen/virtual_region.h>
  38. #include <xen/vmap.h>
  39. #include <xen/trace.h>
  40. #include <xen/libfdt/libfdt.h>
  41. #include <xen/acpi.h>
  42. #include <xen/warning.h>
  43. #include <asm/alternative.h>
  44. #include <asm/page.h>
  45. #include <asm/current.h>
  46. #include <asm/setup.h>
  47. #include <asm/gic.h>
  48. #include <asm/cpuerrata.h>
  49. #include <asm/cpufeature.h>
  50. #include <asm/platform.h>
  51. #include <asm/procinfo.h>
  52. #include <asm/setup.h>
  53. #include <xsm/xsm.h>
  54. #include <asm/acpi.h>
  55. struct bootinfo __initdata bootinfo;
  56. /*
  57. * Sanitized version of cpuinfo containing only features available on all
  58. * cores (only on arm64 as there is no sanitization support on arm32).
  59. */
  60. struct cpuinfo_arm __read_mostly system_cpuinfo;
  61. #ifdef CONFIG_ACPI
  62. bool __read_mostly acpi_disabled;
  63. #endif
  64. #ifdef CONFIG_ARM_32
  65. static unsigned long opt_xenheap_megabytes __initdata;
  66. integer_param("xenheap_megabytes", opt_xenheap_megabytes);
  67. #endif
  68. domid_t __read_mostly max_init_domid;
  69. static __used void init_done(void)
  70. {
  71. /* Must be done past setting system_state. */
  72. unregister_init_virtual_region();
  73. free_init_memory();
  74. startup_cpu_idle_loop();
  75. }
  76. static void __init init_idle_domain(void)
  77. {
  78. scheduler_init();
  79. set_current(idle_vcpu[0]);
  80. /* TODO: setup_idle_pagetable(); */
  81. }
  82. static const char * __initdata processor_implementers[] = {
  83. ['A'] = "ARM Limited",
  84. ['B'] = "Broadcom Corporation",
  85. ['C'] = "Cavium Inc.",
  86. ['D'] = "Digital Equipment Corp",
  87. ['M'] = "Motorola, Freescale Semiconductor Inc.",
  88. ['P'] = "Applied Micro",
  89. ['Q'] = "Qualcomm Inc.",
  90. ['V'] = "Marvell Semiconductor Inc.",
  91. ['i'] = "Intel Corporation",
  92. };
  93. static void __init processor_id(void)
  94. {
  95. const char *implementer = "Unknown";
  96. struct cpuinfo_arm *c = &system_cpuinfo;
  97. identify_cpu(c);
  98. current_cpu_data = *c;
  99. if ( c->midr.implementer < ARRAY_SIZE(processor_implementers) &&
  100. processor_implementers[c->midr.implementer] )
  101. implementer = processor_implementers[c->midr.implementer];
  102. if ( c->midr.architecture != 0xf )
  103. printk("Huh, cpu architecture %x, expected 0xf (defined by cpuid)\n",
  104. c->midr.architecture);
  105. printk("Processor: %"PRIregister": \"%s\", variant: 0x%x, part 0x%03x,"
  106. "rev 0x%x\n", c->midr.bits, implementer,
  107. c->midr.variant, c->midr.part_number, c->midr.revision);
  108. #if defined(CONFIG_ARM_64)
  109. printk("64-bit Execution:\n");
  110. printk(" Processor Features: %016"PRIx64" %016"PRIx64"\n",
  111. system_cpuinfo.pfr64.bits[0], system_cpuinfo.pfr64.bits[1]);
  112. printk(" Exception Levels: EL3:%s EL2:%s EL1:%s EL0:%s\n",
  113. cpu_has_el3_32 ? "64+32" : cpu_has_el3_64 ? "64" : "No",
  114. cpu_has_el2_32 ? "64+32" : cpu_has_el2_64 ? "64" : "No",
  115. cpu_has_el1_32 ? "64+32" : cpu_has_el1_64 ? "64" : "No",
  116. cpu_has_el0_32 ? "64+32" : cpu_has_el0_64 ? "64" : "No");
  117. printk(" Extensions:%s%s%s\n",
  118. cpu_has_fp ? " FloatingPoint" : "",
  119. cpu_has_simd ? " AdvancedSIMD" : "",
  120. cpu_has_gicv3 ? " GICv3-SysReg" : "");
  121. /* Warn user if we find unknown floating-point features */
  122. if ( cpu_has_fp && (boot_cpu_feature64(fp) >= 2) )
  123. printk(XENLOG_WARNING "WARNING: Unknown Floating-point ID:%d, "
  124. "this may result in corruption on the platform\n",
  125. boot_cpu_feature64(fp));
  126. /* Warn user if we find unknown AdvancedSIMD features */
  127. if ( cpu_has_simd && (boot_cpu_feature64(simd) >= 2) )
  128. printk(XENLOG_WARNING "WARNING: Unknown AdvancedSIMD ID:%d, "
  129. "this may result in corruption on the platform\n",
  130. boot_cpu_feature64(simd));
  131. printk(" Debug Features: %016"PRIx64" %016"PRIx64"\n",
  132. system_cpuinfo.dbg64.bits[0], system_cpuinfo.dbg64.bits[1]);
  133. printk(" Auxiliary Features: %016"PRIx64" %016"PRIx64"\n",
  134. system_cpuinfo.aux64.bits[0], system_cpuinfo.aux64.bits[1]);
  135. printk(" Memory Model Features: %016"PRIx64" %016"PRIx64"\n",
  136. system_cpuinfo.mm64.bits[0], system_cpuinfo.mm64.bits[1]);
  137. printk(" ISA Features: %016"PRIx64" %016"PRIx64"\n",
  138. system_cpuinfo.isa64.bits[0], system_cpuinfo.isa64.bits[1]);
  139. #endif
  140. /*
  141. * On AArch64 these refer to the capabilities when running in
  142. * AArch32 mode.
  143. */
  144. if ( cpu_has_aarch32 )
  145. {
  146. printk("32-bit Execution:\n");
  147. printk(" Processor Features: %"PRIregister":%"PRIregister"\n",
  148. system_cpuinfo.pfr32.bits[0], system_cpuinfo.pfr32.bits[1]);
  149. printk(" Instruction Sets:%s%s%s%s%s%s\n",
  150. cpu_has_aarch32 ? " AArch32" : "",
  151. cpu_has_arm ? " A32" : "",
  152. cpu_has_thumb ? " Thumb" : "",
  153. cpu_has_thumb2 ? " Thumb-2" : "",
  154. cpu_has_thumbee ? " ThumbEE" : "",
  155. cpu_has_jazelle ? " Jazelle" : "");
  156. printk(" Extensions:%s%s\n",
  157. cpu_has_gentimer ? " GenericTimer" : "",
  158. cpu_has_security ? " Security" : "");
  159. printk(" Debug Features: %"PRIregister"\n",
  160. system_cpuinfo.dbg32.bits[0]);
  161. printk(" Auxiliary Features: %"PRIregister"\n",
  162. system_cpuinfo.aux32.bits[0]);
  163. printk(" Memory Model Features: %"PRIregister" %"PRIregister"\n"
  164. " %"PRIregister" %"PRIregister"\n",
  165. system_cpuinfo.mm32.bits[0], system_cpuinfo.mm32.bits[1],
  166. system_cpuinfo.mm32.bits[2], system_cpuinfo.mm32.bits[3]);
  167. printk(" ISA Features: %"PRIregister" %"PRIregister" %"PRIregister"\n"
  168. " %"PRIregister" %"PRIregister" %"PRIregister"\n",
  169. system_cpuinfo.isa32.bits[0], system_cpuinfo.isa32.bits[1],
  170. system_cpuinfo.isa32.bits[2], system_cpuinfo.isa32.bits[3],
  171. system_cpuinfo.isa32.bits[4], system_cpuinfo.isa32.bits[5]);
  172. }
  173. else
  174. {
  175. printk("32-bit Execution: Unsupported\n");
  176. }
  177. processor_setup();
  178. }
  179. static void __init dt_unreserved_regions(paddr_t s, paddr_t e,
  180. void (*cb)(paddr_t, paddr_t),
  181. unsigned int first)
  182. {
  183. unsigned int i, nr;
  184. int rc;
  185. rc = fdt_num_mem_rsv(device_tree_flattened);
  186. if ( rc < 0 )
  187. panic("Unable to retrieve the number of reserved regions (rc=%d)\n",
  188. rc);
  189. nr = rc;
  190. for ( i = first; i < nr ; i++ )
  191. {
  192. paddr_t r_s, r_e;
  193. if ( fdt_get_mem_rsv(device_tree_flattened, i, &r_s, &r_e ) < 0 )
  194. /* If we can't read it, pretend it doesn't exist... */
  195. continue;
  196. r_e += r_s; /* fdt_get_mem_rsv returns length */
  197. if ( s < r_e && r_s < e )
  198. {
  199. dt_unreserved_regions(r_e, e, cb, i+1);
  200. dt_unreserved_regions(s, r_s, cb, i+1);
  201. return;
  202. }
  203. }
  204. /*
  205. * i is the current bootmodule we are evaluating across all possible
  206. * kinds.
  207. *
  208. * When retrieving the corresponding reserved-memory addresses
  209. * below, we need to index the bootinfo.reserved_mem bank starting
  210. * from 0, and only counting the reserved-memory modules. Hence,
  211. * we need to use i - nr.
  212. */
  213. for ( ; i - nr < bootinfo.reserved_mem.nr_banks; i++ )
  214. {
  215. paddr_t r_s = bootinfo.reserved_mem.bank[i - nr].start;
  216. paddr_t r_e = r_s + bootinfo.reserved_mem.bank[i - nr].size;
  217. if ( s < r_e && r_s < e )
  218. {
  219. dt_unreserved_regions(r_e, e, cb, i + 1);
  220. dt_unreserved_regions(s, r_s, cb, i + 1);
  221. return;
  222. }
  223. }
  224. cb(s, e);
  225. }
  226. void __init fw_unreserved_regions(paddr_t s, paddr_t e,
  227. void (*cb)(paddr_t, paddr_t),
  228. unsigned int first)
  229. {
  230. if ( acpi_disabled )
  231. dt_unreserved_regions(s, e, cb, first);
  232. else
  233. cb(s, e);
  234. }
  235. struct bootmodule __init *add_boot_module(bootmodule_kind kind,
  236. paddr_t start, paddr_t size,
  237. bool domU)
  238. {
  239. struct bootmodules *mods = &bootinfo.modules;
  240. struct bootmodule *mod;
  241. unsigned int i;
  242. if ( mods->nr_mods == MAX_MODULES )
  243. {
  244. printk("Ignoring %s boot module at %"PRIpaddr"-%"PRIpaddr" (too many)\n",
  245. boot_module_kind_as_string(kind), start, start + size);
  246. return NULL;
  247. }
  248. for ( i = 0 ; i < mods->nr_mods ; i++ )
  249. {
  250. mod = &mods->module[i];
  251. if ( mod->kind == kind && mod->start == start )
  252. {
  253. if ( !domU )
  254. mod->domU = false;
  255. return mod;
  256. }
  257. }
  258. mod = &mods->module[mods->nr_mods++];
  259. mod->kind = kind;
  260. mod->start = start;
  261. mod->size = size;
  262. mod->domU = domU;
  263. return mod;
  264. }
  265. /*
  266. * boot_module_find_by_kind can only be used to return Xen modules (e.g
  267. * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
  268. * modules.
  269. */
  270. struct bootmodule * __init boot_module_find_by_kind(bootmodule_kind kind)
  271. {
  272. struct bootmodules *mods = &bootinfo.modules;
  273. struct bootmodule *mod;
  274. int i;
  275. for (i = 0 ; i < mods->nr_mods ; i++ )
  276. {
  277. mod = &mods->module[i];
  278. if ( mod->kind == kind && !mod->domU )
  279. return mod;
  280. }
  281. return NULL;
  282. }
  283. void __init add_boot_cmdline(const char *name, const char *cmdline,
  284. bootmodule_kind kind, paddr_t start, bool domU)
  285. {
  286. struct bootcmdlines *cmds = &bootinfo.cmdlines;
  287. struct bootcmdline *cmd;
  288. if ( cmds->nr_mods == MAX_MODULES )
  289. {
  290. printk("Ignoring %s cmdline (too many)\n", name);
  291. return;
  292. }
  293. cmd = &cmds->cmdline[cmds->nr_mods++];
  294. cmd->kind = kind;
  295. cmd->domU = domU;
  296. cmd->start = start;
  297. ASSERT(strlen(name) <= DT_MAX_NAME);
  298. safe_strcpy(cmd->dt_name, name);
  299. if ( strlen(cmdline) > BOOTMOD_MAX_CMDLINE )
  300. panic("module %s command line too long\n", name);
  301. safe_strcpy(cmd->cmdline, cmdline);
  302. }
  303. /*
  304. * boot_cmdline_find_by_kind can only be used to return Xen modules (e.g
  305. * XSM, DTB) or Dom0 modules. This is not suitable for looking up guest
  306. * modules.
  307. */
  308. struct bootcmdline * __init boot_cmdline_find_by_kind(bootmodule_kind kind)
  309. {
  310. struct bootcmdlines *cmds = &bootinfo.cmdlines;
  311. struct bootcmdline *cmd;
  312. int i;
  313. for ( i = 0 ; i < cmds->nr_mods ; i++ )
  314. {
  315. cmd = &cmds->cmdline[i];
  316. if ( cmd->kind == kind && !cmd->domU )
  317. return cmd;
  318. }
  319. return NULL;
  320. }
  321. struct bootcmdline * __init boot_cmdline_find_by_name(const char *name)
  322. {
  323. struct bootcmdlines *mods = &bootinfo.cmdlines;
  324. struct bootcmdline *mod;
  325. unsigned int i;
  326. for (i = 0 ; i < mods->nr_mods ; i++ )
  327. {
  328. mod = &mods->cmdline[i];
  329. if ( strcmp(mod->dt_name, name) == 0 )
  330. return mod;
  331. }
  332. return NULL;
  333. }
  334. struct bootmodule * __init boot_module_find_by_addr_and_kind(bootmodule_kind kind,
  335. paddr_t start)
  336. {
  337. struct bootmodules *mods = &bootinfo.modules;
  338. struct bootmodule *mod;
  339. unsigned int i;
  340. for (i = 0 ; i < mods->nr_mods ; i++ )
  341. {
  342. mod = &mods->module[i];
  343. if ( mod->kind == kind && mod->start == start )
  344. return mod;
  345. }
  346. return NULL;
  347. }
  348. const char * __init boot_module_kind_as_string(bootmodule_kind kind)
  349. {
  350. switch ( kind )
  351. {
  352. case BOOTMOD_XEN: return "Xen";
  353. case BOOTMOD_FDT: return "Device Tree";
  354. case BOOTMOD_KERNEL: return "Kernel";
  355. case BOOTMOD_RAMDISK: return "Ramdisk";
  356. case BOOTMOD_XSM: return "XSM";
  357. case BOOTMOD_GUEST_DTB: return "DTB";
  358. case BOOTMOD_UNKNOWN: return "Unknown";
  359. default: BUG();
  360. }
  361. }
  362. void __init discard_initial_modules(void)
  363. {
  364. struct bootmodules *mi = &bootinfo.modules;
  365. int i;
  366. for ( i = 0; i < mi->nr_mods; i++ )
  367. {
  368. paddr_t s = mi->module[i].start;
  369. paddr_t e = s + PAGE_ALIGN(mi->module[i].size);
  370. if ( mi->module[i].kind == BOOTMOD_XEN )
  371. continue;
  372. if ( !mfn_valid(maddr_to_mfn(s)) ||
  373. !mfn_valid(maddr_to_mfn(e)) )
  374. continue;
  375. fw_unreserved_regions(s, e, init_domheap_pages, 0);
  376. }
  377. mi->nr_mods = 0;
  378. remove_early_mappings();
  379. }
  380. /* Relocate the FDT in Xen heap */
  381. static void * __init relocate_fdt(paddr_t dtb_paddr, size_t dtb_size)
  382. {
  383. void *fdt = xmalloc_bytes(dtb_size);
  384. if ( !fdt )
  385. panic("Unable to allocate memory for relocating the Device-Tree.\n");
  386. copy_from_paddr(fdt, dtb_paddr, dtb_size);
  387. return fdt;
  388. }
  389. #ifdef CONFIG_ARM_32
  390. /*
  391. * Returns the end address of the highest region in the range s..e
  392. * with required size and alignment that does not conflict with the
  393. * modules from first_mod to nr_modules.
  394. *
  395. * For non-recursive callers first_mod should normally be 0 (all
  396. * modules and Xen itself) or 1 (all modules but not Xen).
  397. */
  398. static paddr_t __init consider_modules(paddr_t s, paddr_t e,
  399. uint32_t size, paddr_t align,
  400. int first_mod)
  401. {
  402. const struct bootmodules *mi = &bootinfo.modules;
  403. int i;
  404. int nr;
  405. s = (s+align-1) & ~(align-1);
  406. e = e & ~(align-1);
  407. if ( s > e || e - s < size )
  408. return 0;
  409. /* First check the boot modules */
  410. for ( i = first_mod; i < mi->nr_mods; i++ )
  411. {
  412. paddr_t mod_s = mi->module[i].start;
  413. paddr_t mod_e = mod_s + mi->module[i].size;
  414. if ( s < mod_e && mod_s < e )
  415. {
  416. mod_e = consider_modules(mod_e, e, size, align, i+1);
  417. if ( mod_e )
  418. return mod_e;
  419. return consider_modules(s, mod_s, size, align, i+1);
  420. }
  421. }
  422. /* Now check any fdt reserved areas. */
  423. nr = fdt_num_mem_rsv(device_tree_flattened);
  424. for ( ; i < mi->nr_mods + nr; i++ )
  425. {
  426. paddr_t mod_s, mod_e;
  427. if ( fdt_get_mem_rsv(device_tree_flattened,
  428. i - mi->nr_mods,
  429. &mod_s, &mod_e ) < 0 )
  430. /* If we can't read it, pretend it doesn't exist... */
  431. continue;
  432. /* fdt_get_mem_rsv returns length */
  433. mod_e += mod_s;
  434. if ( s < mod_e && mod_s < e )
  435. {
  436. mod_e = consider_modules(mod_e, e, size, align, i+1);
  437. if ( mod_e )
  438. return mod_e;
  439. return consider_modules(s, mod_s, size, align, i+1);
  440. }
  441. }
  442. /*
  443. * i is the current bootmodule we are evaluating, across all
  444. * possible kinds of bootmodules.
  445. *
  446. * When retrieving the corresponding reserved-memory addresses, we
  447. * need to index the bootinfo.reserved_mem bank starting from 0, and
  448. * only counting the reserved-memory modules. Hence, we need to use
  449. * i - nr.
  450. */
  451. nr += mi->nr_mods;
  452. for ( ; i - nr < bootinfo.reserved_mem.nr_banks; i++ )
  453. {
  454. paddr_t r_s = bootinfo.reserved_mem.bank[i - nr].start;
  455. paddr_t r_e = r_s + bootinfo.reserved_mem.bank[i - nr].size;
  456. if ( s < r_e && r_s < e )
  457. {
  458. r_e = consider_modules(r_e, e, size, align, i + 1);
  459. if ( r_e )
  460. return r_e;
  461. return consider_modules(s, r_s, size, align, i + 1);
  462. }
  463. }
  464. return e;
  465. }
  466. #endif
  467. /*
  468. * Return the end of the non-module region starting at s. In other
  469. * words return s the start of the next modules after s.
  470. *
  471. * On input *end is the end of the region which should be considered
  472. * and it is updated to reflect the end of the module, clipped to the
  473. * end of the region if it would run over.
  474. */
  475. static paddr_t __init next_module(paddr_t s, paddr_t *end)
  476. {
  477. struct bootmodules *mi = &bootinfo.modules;
  478. paddr_t lowest = ~(paddr_t)0;
  479. int i;
  480. for ( i = 0; i < mi->nr_mods; i++ )
  481. {
  482. paddr_t mod_s = mi->module[i].start;
  483. paddr_t mod_e = mod_s + mi->module[i].size;
  484. if ( !mi->module[i].size )
  485. continue;
  486. if ( mod_s < s )
  487. continue;
  488. if ( mod_s > lowest )
  489. continue;
  490. if ( mod_s > *end )
  491. continue;
  492. lowest = mod_s;
  493. *end = min(*end, mod_e);
  494. }
  495. return lowest;
  496. }
  497. static void __init init_pdx(void)
  498. {
  499. paddr_t bank_start, bank_size, bank_end;
  500. /*
  501. * Arm does not have any restrictions on the bits to compress. Pass 0 to
  502. * let the common code further restrict the mask.
  503. *
  504. * If the logic changes in pfn_pdx_hole_setup we might have to
  505. * update this function too.
  506. */
  507. uint64_t mask = pdx_init_mask(0x0);
  508. int bank;
  509. for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
  510. {
  511. bank_start = bootinfo.mem.bank[bank].start;
  512. bank_size = bootinfo.mem.bank[bank].size;
  513. mask |= bank_start | pdx_region_mask(bank_start, bank_size);
  514. }
  515. for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
  516. {
  517. bank_start = bootinfo.mem.bank[bank].start;
  518. bank_size = bootinfo.mem.bank[bank].size;
  519. if (~mask & pdx_region_mask(bank_start, bank_size))
  520. mask = 0;
  521. }
  522. pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
  523. for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
  524. {
  525. bank_start = bootinfo.mem.bank[bank].start;
  526. bank_size = bootinfo.mem.bank[bank].size;
  527. bank_end = bank_start + bank_size;
  528. set_pdx_range(paddr_to_pfn(bank_start),
  529. paddr_to_pfn(bank_end));
  530. }
  531. }
  532. /* Static memory initialization */
  533. static void __init init_staticmem_pages(void)
  534. {
  535. #ifdef CONFIG_STATIC_MEMORY
  536. unsigned int bank;
  537. for ( bank = 0 ; bank < bootinfo.reserved_mem.nr_banks; bank++ )
  538. {
  539. if ( bootinfo.reserved_mem.bank[bank].xen_domain )
  540. {
  541. mfn_t bank_start = _mfn(PFN_UP(bootinfo.reserved_mem.bank[bank].start));
  542. unsigned long bank_pages = PFN_DOWN(bootinfo.reserved_mem.bank[bank].size);
  543. mfn_t bank_end = mfn_add(bank_start, bank_pages);
  544. if ( mfn_x(bank_end) <= mfn_x(bank_start) )
  545. return;
  546. free_staticmem_pages(mfn_to_page(bank_start), bank_pages, false);
  547. }
  548. }
  549. #endif
  550. }
  551. #ifdef CONFIG_ARM_32
  552. static void __init setup_mm(void)
  553. {
  554. paddr_t ram_start, ram_end, ram_size;
  555. paddr_t s, e;
  556. unsigned long ram_pages;
  557. unsigned long heap_pages, xenheap_pages, domheap_pages;
  558. unsigned int i;
  559. const uint32_t ctr = READ_CP32(CTR);
  560. if ( !bootinfo.mem.nr_banks )
  561. panic("No memory bank\n");
  562. /* We only supports instruction caches implementing the IVIPT extension. */
  563. if ( ((ctr >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) == ICACHE_POLICY_AIVIVT )
  564. panic("AIVIVT instruction cache not supported\n");
  565. init_pdx();
  566. ram_start = bootinfo.mem.bank[0].start;
  567. ram_size = bootinfo.mem.bank[0].size;
  568. ram_end = ram_start + ram_size;
  569. for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
  570. {
  571. paddr_t bank_start = bootinfo.mem.bank[i].start;
  572. paddr_t bank_size = bootinfo.mem.bank[i].size;
  573. paddr_t bank_end = bank_start + bank_size;
  574. ram_size = ram_size + bank_size;
  575. ram_start = min(ram_start,bank_start);
  576. ram_end = max(ram_end,bank_end);
  577. }
  578. total_pages = ram_pages = ram_size >> PAGE_SHIFT;
  579. /*
  580. * If the user has not requested otherwise via the command line
  581. * then locate the xenheap using these constraints:
  582. *
  583. * - must be 32 MiB aligned
  584. * - must not include Xen itself or the boot modules
  585. * - must be at most 1GB or 1/32 the total RAM in the system if less
  586. * - must be at least 32M
  587. *
  588. * We try to allocate the largest xenheap possible within these
  589. * constraints.
  590. */
  591. heap_pages = ram_pages;
  592. if ( opt_xenheap_megabytes )
  593. xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
  594. else
  595. {
  596. xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
  597. xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
  598. xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
  599. }
  600. do
  601. {
  602. e = consider_modules(ram_start, ram_end,
  603. pfn_to_paddr(xenheap_pages),
  604. 32<<20, 0);
  605. if ( e )
  606. break;
  607. xenheap_pages >>= 1;
  608. } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );
  609. if ( ! e )
  610. panic("Not not enough space for xenheap\n");
  611. domheap_pages = heap_pages - xenheap_pages;
  612. printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
  613. e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
  614. opt_xenheap_megabytes ? ", from command-line" : "");
  615. printk("Dom heap: %lu pages\n", domheap_pages);
  616. setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);
  617. /* Add non-xenheap memory */
  618. for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
  619. {
  620. paddr_t bank_start = bootinfo.mem.bank[i].start;
  621. paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;
  622. s = bank_start;
  623. while ( s < bank_end )
  624. {
  625. paddr_t n = bank_end;
  626. e = next_module(s, &n);
  627. if ( e == ~(paddr_t)0 )
  628. {
  629. e = n = ram_end;
  630. }
  631. /*
  632. * Module in a RAM bank other than the one which we are
  633. * not dealing with here.
  634. */
  635. if ( e > bank_end )
  636. e = bank_end;
  637. /* Avoid the xenheap */
  638. if ( s < mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages))
  639. && mfn_to_maddr(xenheap_mfn_start) < e )
  640. {
  641. e = mfn_to_maddr(xenheap_mfn_start);
  642. n = mfn_to_maddr(mfn_add(xenheap_mfn_start, xenheap_pages));
  643. }
  644. fw_unreserved_regions(s, e, init_boot_pages, 0);
  645. s = n;
  646. }
  647. }
  648. /* Frame table covers all of RAM region, including holes */
  649. setup_frametable_mappings(ram_start, ram_end);
  650. max_page = PFN_DOWN(ram_end);
  651. /* Add xenheap memory that was not already added to the boot allocator. */
  652. init_xenheap_pages(mfn_to_maddr(xenheap_mfn_start),
  653. mfn_to_maddr(xenheap_mfn_end));
  654. init_staticmem_pages();
  655. }
  656. #else /* CONFIG_ARM_64 */
  657. static void __init setup_mm(void)
  658. {
  659. paddr_t ram_start = ~0;
  660. paddr_t ram_end = 0;
  661. paddr_t ram_size = 0;
  662. int bank;
  663. init_pdx();
  664. total_pages = 0;
  665. for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
  666. {
  667. paddr_t bank_start = bootinfo.mem.bank[bank].start;
  668. paddr_t bank_size = bootinfo.mem.bank[bank].size;
  669. paddr_t bank_end = bank_start + bank_size;
  670. paddr_t s, e;
  671. ram_size = ram_size + bank_size;
  672. ram_start = min(ram_start,bank_start);
  673. ram_end = max(ram_end,bank_end);
  674. setup_xenheap_mappings(bank_start>>PAGE_SHIFT, bank_size>>PAGE_SHIFT);
  675. s = bank_start;
  676. while ( s < bank_end )
  677. {
  678. paddr_t n = bank_end;
  679. e = next_module(s, &n);
  680. if ( e == ~(paddr_t)0 )
  681. {
  682. e = n = bank_end;
  683. }
  684. if ( e > bank_end )
  685. e = bank_end;
  686. fw_unreserved_regions(s, e, init_boot_pages, 0);
  687. s = n;
  688. }
  689. }
  690. total_pages += ram_size >> PAGE_SHIFT;
  691. xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
  692. xenheap_mfn_start = maddr_to_mfn(ram_start);
  693. xenheap_mfn_end = maddr_to_mfn(ram_end);
  694. setup_frametable_mappings(ram_start, ram_end);
  695. max_page = PFN_DOWN(ram_end);
  696. init_staticmem_pages();
  697. }
  698. #endif
  699. static bool __init is_dom0less_mode(void)
  700. {
  701. struct bootmodules *mods = &bootinfo.modules;
  702. struct bootmodule *mod;
  703. unsigned int i;
  704. bool dom0found = false;
  705. bool domUfound = false;
  706. /* Look into the bootmodules */
  707. for ( i = 0 ; i < mods->nr_mods ; i++ )
  708. {
  709. mod = &mods->module[i];
  710. /* Find if dom0 and domU kernels are present */
  711. if ( mod->kind == BOOTMOD_KERNEL )
  712. {
  713. if ( mod->domU == false )
  714. {
  715. dom0found = true;
  716. break;
  717. }
  718. else
  719. domUfound = true;
  720. }
  721. }
  722. /*
  723. * If there is no dom0 kernel but at least one domU, then we are in
  724. * dom0less mode
  725. */
  726. return ( !dom0found && domUfound );
  727. }
  728. size_t __read_mostly dcache_line_bytes;
  729. /* C entry point for boot CPU */
  730. void __init start_xen(unsigned long boot_phys_offset,
  731. unsigned long fdt_paddr)
  732. {
  733. size_t fdt_size;
  734. int cpus, i;
  735. const char *cmdline;
  736. struct bootmodule *xen_bootmodule;
  737. struct domain *d;
  738. int rc;
  739. dcache_line_bytes = read_dcache_line_bytes();
  740. percpu_init_areas();
  741. set_processor_id(0); /* needed early, for smp_processor_id() */
  742. setup_virtual_regions(NULL, NULL);
  743. /* Initialize traps early allow us to get backtrace when an error occurred */
  744. init_traps();
  745. setup_pagetables(boot_phys_offset);
  746. smp_clear_cpu_maps();
  747. device_tree_flattened = early_fdt_map(fdt_paddr);
  748. if ( !device_tree_flattened )
  749. panic("Invalid device tree blob at physical address %#lx.\n"
  750. "The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n"
  751. "Please check your bootloader.\n",
  752. fdt_paddr);
  753. /* Register Xen's load address as a boot module. */
  754. xen_bootmodule = add_boot_module(BOOTMOD_XEN,
  755. (paddr_t)(uintptr_t)(_start + boot_phys_offset),
  756. (paddr_t)(uintptr_t)(_end - _start), false);
  757. BUG_ON(!xen_bootmodule);
  758. fdt_size = boot_fdt_info(device_tree_flattened, fdt_paddr);
  759. cmdline = boot_fdt_cmdline(device_tree_flattened);
  760. printk("Command line: %s\n", cmdline);
  761. cmdline_parse(cmdline);
  762. setup_mm();
  763. /* Parse the ACPI tables for possible boot-time configuration */
  764. acpi_boot_table_init();
  765. end_boot_allocator();
  766. /*
  767. * The memory subsystem has been initialized, we can now switch from
  768. * early_boot -> boot.
  769. */
  770. system_state = SYS_STATE_boot;
  771. vm_init();
  772. if ( acpi_disabled )
  773. {
  774. printk("Booting using Device Tree\n");
  775. device_tree_flattened = relocate_fdt(fdt_paddr, fdt_size);
  776. dt_unflatten_host_device_tree();
  777. }
  778. else
  779. {
  780. printk("Booting using ACPI\n");
  781. device_tree_flattened = NULL;
  782. }
  783. init_IRQ();
  784. platform_init();
  785. preinit_xen_time();
  786. gic_preinit();
  787. arm_uart_init();
  788. console_init_preirq();
  789. console_init_ring();
  790. processor_id();
  791. smp_init_cpus();
  792. cpus = smp_get_max_cpus();
  793. printk(XENLOG_INFO "SMP: Allowing %u CPUs\n", cpus);
  794. nr_cpu_ids = cpus;
  795. /*
  796. * Some errata relies on SMCCC version which is detected by psci_init()
  797. * (called from smp_init_cpus()).
  798. */
  799. check_local_cpu_errata();
  800. init_xen_time();
  801. gic_init();
  802. tasklet_subsys_init();
  803. if ( xsm_dt_init() != 1 )
  804. warning_add("WARNING: SILO mode is not enabled.\n"
  805. "It has implications on the security of the system,\n"
  806. "unless the communications have been forbidden between\n"
  807. "untrusted domains.\n");
  808. init_maintenance_interrupt();
  809. init_timer_interrupt();
  810. timer_init();
  811. init_idle_domain();
  812. rcu_init();
  813. setup_system_domains();
  814. local_irq_enable();
  815. local_abort_enable();
  816. smp_prepare_cpus();
  817. initialize_keytable();
  818. console_init_postirq();
  819. do_presmp_initcalls();
  820. for_each_present_cpu ( i )
  821. {
  822. if ( (num_online_cpus() < cpus) && !cpu_online(i) )
  823. {
  824. int ret = cpu_up(i);
  825. if ( ret != 0 )
  826. printk("Failed to bring up CPU %u (error %d)\n", i, ret);
  827. }
  828. }
  829. printk("Brought up %ld CPUs\n", (long)num_online_cpus());
  830. /* TODO: smp_cpus_done(); */
  831. /* This should be done in a vpmu driver but we do not have one yet. */
  832. vpmu_is_available = cpu_has_pmu;
  833. /*
  834. * The IOMMU subsystem must be initialized before P2M as we need
  835. * to gather requirements regarding the maximum IPA bits supported by
  836. * each IOMMU device.
  837. */
  838. rc = iommu_setup();
  839. if ( !iommu_enabled && rc != -ENODEV )
  840. panic("Couldn't configure correctly all the IOMMUs.\n");
  841. setup_virt_paging();
  842. do_initcalls();
  843. /*
  844. * It needs to be called after do_initcalls to be able to use
  845. * stop_machine (tasklets initialized via an initcall).
  846. */
  847. apply_alternatives_all();
  848. enable_errata_workarounds();
  849. /* Create initial domain 0. */
  850. if ( !is_dom0less_mode() )
  851. create_dom0();
  852. else
  853. printk(XENLOG_INFO "Xen dom0less mode detected\n");
  854. if ( acpi_disabled )
  855. create_domUs();
  856. /*
  857. * This needs to be called **before** heap_init_late() so modules
  858. * will be scrubbed (unless suppressed).
  859. */
  860. discard_initial_modules();
  861. heap_init_late();
  862. init_trace_bufs();
  863. init_constructors();
  864. console_endboot();
  865. /* Hide UART from DOM0 if we're using it */
  866. serial_endboot();
  867. system_state = SYS_STATE_active;
  868. for_each_domain( d )
  869. domain_unpause_by_systemcontroller(d);
  870. /* Switch on to the dynamically allocated stack for the idle vcpu
  871. * since the static one we're running on is about to be freed. */
  872. memcpy(idle_vcpu[0]->arch.cpu_info, get_cpu_info(),
  873. sizeof(struct cpu_info));
  874. switch_stack_and_jump(idle_vcpu[0]->arch.cpu_info, init_done);
  875. }
  876. void arch_get_xen_caps(xen_capabilities_info_t *info)
  877. {
  878. /* Interface name is always xen-3.0-* for Xen-3.x. */
  879. int major = 3, minor = 0;
  880. char s[32];
  881. (*info)[0] = '\0';
  882. #ifdef CONFIG_ARM_64
  883. snprintf(s, sizeof(s), "xen-%d.%d-aarch64 ", major, minor);
  884. safe_strcat(*info, s);
  885. #endif
  886. if ( cpu_has_aarch32 )
  887. {
  888. snprintf(s, sizeof(s), "xen-%d.%d-armv7l ", major, minor);
  889. safe_strcat(*info, s);
  890. }
  891. }
  892. /*
  893. * Local variables:
  894. * mode: C
  895. * c-file-style: "BSD"
  896. * c-basic-offset: 4
  897. * indent-tabs-mode: nil
  898. * End:
  899. */