PageRenderTime 56ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/x86/xen/setup.c

https://github.com/mdombroski/linux-2.6
C | 540 lines | 365 code | 81 blank | 94 comment | 72 complexity | e05247d05ce83ce7b7811fd29c3503c0 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * Machine specific setup for xen
  3. *
  4. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  5. */
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pm.h>
  10. #include <linux/memblock.h>
  11. #include <linux/cpuidle.h>
  12. #include <linux/cpufreq.h>
  13. #include <asm/elf.h>
  14. #include <asm/vdso.h>
  15. #include <asm/e820.h>
  16. #include <asm/setup.h>
  17. #include <asm/acpi.h>
  18. #include <asm/xen/hypervisor.h>
  19. #include <asm/xen/hypercall.h>
  20. #include <xen/xen.h>
  21. #include <xen/page.h>
  22. #include <xen/interface/callback.h>
  23. #include <xen/interface/memory.h>
  24. #include <xen/interface/physdev.h>
  25. #include <xen/features.h>
  26. #include "xen-ops.h"
  27. #include "vdso.h"
  28. /* These are code, but not functions. Defined in entry.S */
  29. extern const char xen_hypervisor_callback[];
  30. extern const char xen_failsafe_callback[];
  31. extern void xen_sysenter_target(void);
  32. extern void xen_syscall_target(void);
  33. extern void xen_syscall32_target(void);
  34. /* Amount of extra memory space we add to the e820 ranges */
  35. struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
  36. /* Number of pages released from the initial allocation. */
  37. unsigned long xen_released_pages;
  38. /*
  39. * The maximum amount of extra memory compared to the base size. The
  40. * main scaling factor is the size of struct page. At extreme ratios
  41. * of base:extra, all the base memory can be filled with page
  42. * structures for the extra memory, leaving no space for anything
  43. * else.
  44. *
  45. * 10x seems like a reasonable balance between scaling flexibility and
  46. * leaving a practically usable system.
  47. */
  48. #define EXTRA_MEM_RATIO (10)
  49. static void __init xen_add_extra_mem(u64 start, u64 size)
  50. {
  51. unsigned long pfn;
  52. int i;
  53. for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
  54. /* Add new region. */
  55. if (xen_extra_mem[i].size == 0) {
  56. xen_extra_mem[i].start = start;
  57. xen_extra_mem[i].size = size;
  58. break;
  59. }
  60. /* Append to existing region. */
  61. if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
  62. xen_extra_mem[i].size += size;
  63. break;
  64. }
  65. }
  66. if (i == XEN_EXTRA_MEM_MAX_REGIONS)
  67. printk(KERN_WARNING "Warning: not enough extra memory regions\n");
  68. memblock_reserve(start, size);
  69. xen_max_p2m_pfn = PFN_DOWN(start + size);
  70. for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
  71. __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  72. }
  73. static unsigned long __init xen_do_chunk(unsigned long start,
  74. unsigned long end, bool release)
  75. {
  76. struct xen_memory_reservation reservation = {
  77. .address_bits = 0,
  78. .extent_order = 0,
  79. .domid = DOMID_SELF
  80. };
  81. unsigned long len = 0;
  82. unsigned long pfn;
  83. int ret;
  84. for (pfn = start; pfn < end; pfn++) {
  85. unsigned long frame;
  86. unsigned long mfn = pfn_to_mfn(pfn);
  87. if (release) {
  88. /* Make sure pfn exists to start with */
  89. if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
  90. continue;
  91. frame = mfn;
  92. } else {
  93. if (mfn != INVALID_P2M_ENTRY)
  94. continue;
  95. frame = pfn;
  96. }
  97. set_xen_guest_handle(reservation.extent_start, &frame);
  98. reservation.nr_extents = 1;
  99. ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
  100. &reservation);
  101. WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
  102. release ? "release" : "populate", pfn, ret);
  103. if (ret == 1) {
  104. if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
  105. if (release)
  106. break;
  107. set_xen_guest_handle(reservation.extent_start, &frame);
  108. reservation.nr_extents = 1;
  109. ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
  110. &reservation);
  111. break;
  112. }
  113. len++;
  114. } else
  115. break;
  116. }
  117. if (len)
  118. printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
  119. release ? "Freeing" : "Populating",
  120. start, end, len,
  121. release ? "freed" : "added");
  122. return len;
  123. }
  124. static unsigned long __init xen_release_chunk(unsigned long start,
  125. unsigned long end)
  126. {
  127. return xen_do_chunk(start, end, true);
  128. }
  129. static unsigned long __init xen_populate_chunk(
  130. const struct e820entry *list, size_t map_size,
  131. unsigned long max_pfn, unsigned long *last_pfn,
  132. unsigned long credits_left)
  133. {
  134. const struct e820entry *entry;
  135. unsigned int i;
  136. unsigned long done = 0;
  137. unsigned long dest_pfn;
  138. for (i = 0, entry = list; i < map_size; i++, entry++) {
  139. unsigned long s_pfn;
  140. unsigned long e_pfn;
  141. unsigned long pfns;
  142. long capacity;
  143. if (credits_left <= 0)
  144. break;
  145. if (entry->type != E820_RAM)
  146. continue;
  147. e_pfn = PFN_DOWN(entry->addr + entry->size);
  148. /* We only care about E820 after the xen_start_info->nr_pages */
  149. if (e_pfn <= max_pfn)
  150. continue;
  151. s_pfn = PFN_UP(entry->addr);
  152. /* If the E820 falls within the nr_pages, we want to start
  153. * at the nr_pages PFN.
  154. * If that would mean going past the E820 entry, skip it
  155. */
  156. if (s_pfn <= max_pfn) {
  157. capacity = e_pfn - max_pfn;
  158. dest_pfn = max_pfn;
  159. } else {
  160. capacity = e_pfn - s_pfn;
  161. dest_pfn = s_pfn;
  162. }
  163. if (credits_left < capacity)
  164. capacity = credits_left;
  165. pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
  166. done += pfns;
  167. *last_pfn = (dest_pfn + pfns);
  168. if (pfns < capacity)
  169. break;
  170. credits_left -= pfns;
  171. }
  172. return done;
  173. }
  174. static void __init xen_set_identity_and_release_chunk(
  175. unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
  176. unsigned long *released, unsigned long *identity)
  177. {
  178. unsigned long pfn;
  179. /*
  180. * If the PFNs are currently mapped, the VA mapping also needs
  181. * to be updated to be 1:1.
  182. */
  183. for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
  184. (void)HYPERVISOR_update_va_mapping(
  185. (unsigned long)__va(pfn << PAGE_SHIFT),
  186. mfn_pte(pfn, PAGE_KERNEL_IO), 0);
  187. if (start_pfn < nr_pages)
  188. *released += xen_release_chunk(
  189. start_pfn, min(end_pfn, nr_pages));
  190. *identity += set_phys_range_identity(start_pfn, end_pfn);
  191. }
  192. static unsigned long __init xen_set_identity_and_release(
  193. const struct e820entry *list, size_t map_size, unsigned long nr_pages)
  194. {
  195. phys_addr_t start = 0;
  196. unsigned long released = 0;
  197. unsigned long identity = 0;
  198. const struct e820entry *entry;
  199. int i;
  200. /*
  201. * Combine non-RAM regions and gaps until a RAM region (or the
  202. * end of the map) is reached, then set the 1:1 map and
  203. * release the pages (if available) in those non-RAM regions.
  204. *
  205. * The combined non-RAM regions are rounded to a whole number
  206. * of pages so any partial pages are accessible via the 1:1
  207. * mapping. This is needed for some BIOSes that put (for
  208. * example) the DMI tables in a reserved region that begins on
  209. * a non-page boundary.
  210. */
  211. for (i = 0, entry = list; i < map_size; i++, entry++) {
  212. phys_addr_t end = entry->addr + entry->size;
  213. if (entry->type == E820_RAM || i == map_size - 1) {
  214. unsigned long start_pfn = PFN_DOWN(start);
  215. unsigned long end_pfn = PFN_UP(end);
  216. if (entry->type == E820_RAM)
  217. end_pfn = PFN_UP(entry->addr);
  218. if (start_pfn < end_pfn)
  219. xen_set_identity_and_release_chunk(
  220. start_pfn, end_pfn, nr_pages,
  221. &released, &identity);
  222. start = end;
  223. }
  224. }
  225. if (released)
  226. printk(KERN_INFO "Released %lu pages of unused memory\n", released);
  227. if (identity)
  228. printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
  229. return released;
  230. }
  231. static unsigned long __init xen_get_max_pages(void)
  232. {
  233. unsigned long max_pages = MAX_DOMAIN_PAGES;
  234. domid_t domid = DOMID_SELF;
  235. int ret;
  236. /*
  237. * For the initial domain we use the maximum reservation as
  238. * the maximum page.
  239. *
  240. * For guest domains the current maximum reservation reflects
  241. * the current maximum rather than the static maximum. In this
  242. * case the e820 map provided to us will cover the static
  243. * maximum region.
  244. */
  245. if (xen_initial_domain()) {
  246. ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
  247. if (ret > 0)
  248. max_pages = ret;
  249. }
  250. return min(max_pages, MAX_DOMAIN_PAGES);
  251. }
  252. static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
  253. {
  254. u64 end = start + size;
  255. /* Align RAM regions to page boundaries. */
  256. if (type == E820_RAM) {
  257. start = PAGE_ALIGN(start);
  258. end &= ~((u64)PAGE_SIZE - 1);
  259. }
  260. e820_add_region(start, end - start, type);
  261. }
  262. /**
  263. * machine_specific_memory_setup - Hook for machine specific memory setup.
  264. **/
  265. char * __init xen_memory_setup(void)
  266. {
  267. static struct e820entry map[E820MAX] __initdata;
  268. unsigned long max_pfn = xen_start_info->nr_pages;
  269. unsigned long long mem_end;
  270. int rc;
  271. struct xen_memory_map memmap;
  272. unsigned long max_pages;
  273. unsigned long last_pfn = 0;
  274. unsigned long extra_pages = 0;
  275. unsigned long populated;
  276. int i;
  277. int op;
  278. max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
  279. mem_end = PFN_PHYS(max_pfn);
  280. memmap.nr_entries = E820MAX;
  281. set_xen_guest_handle(memmap.buffer, map);
  282. op = xen_initial_domain() ?
  283. XENMEM_machine_memory_map :
  284. XENMEM_memory_map;
  285. rc = HYPERVISOR_memory_op(op, &memmap);
  286. if (rc == -ENOSYS) {
  287. BUG_ON(xen_initial_domain());
  288. memmap.nr_entries = 1;
  289. map[0].addr = 0ULL;
  290. map[0].size = mem_end;
  291. /* 8MB slack (to balance backend allocations). */
  292. map[0].size += 8ULL << 20;
  293. map[0].type = E820_RAM;
  294. rc = 0;
  295. }
  296. BUG_ON(rc);
  297. /* Make sure the Xen-supplied memory map is well-ordered. */
  298. sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
  299. max_pages = xen_get_max_pages();
  300. if (max_pages > max_pfn)
  301. extra_pages += max_pages - max_pfn;
  302. /*
  303. * Set P2M for all non-RAM pages and E820 gaps to be identity
  304. * type PFNs. Any RAM pages that would be made inaccesible by
  305. * this are first released.
  306. */
  307. xen_released_pages = xen_set_identity_and_release(
  308. map, memmap.nr_entries, max_pfn);
  309. /*
  310. * Populate back the non-RAM pages and E820 gaps that had been
  311. * released. */
  312. populated = xen_populate_chunk(map, memmap.nr_entries,
  313. max_pfn, &last_pfn, xen_released_pages);
  314. xen_released_pages -= populated;
  315. extra_pages += xen_released_pages;
  316. if (last_pfn > max_pfn) {
  317. max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
  318. mem_end = PFN_PHYS(max_pfn);
  319. }
  320. /*
  321. * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
  322. * factor the base size. On non-highmem systems, the base
  323. * size is the full initial memory allocation; on highmem it
  324. * is limited to the max size of lowmem, so that it doesn't
  325. * get completely filled.
  326. *
  327. * In principle there could be a problem in lowmem systems if
  328. * the initial memory is also very large with respect to
  329. * lowmem, but we won't try to deal with that here.
  330. */
  331. extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
  332. extra_pages);
  333. i = 0;
  334. while (i < memmap.nr_entries) {
  335. u64 addr = map[i].addr;
  336. u64 size = map[i].size;
  337. u32 type = map[i].type;
  338. if (type == E820_RAM) {
  339. if (addr < mem_end) {
  340. size = min(size, mem_end - addr);
  341. } else if (extra_pages) {
  342. size = min(size, (u64)extra_pages * PAGE_SIZE);
  343. extra_pages -= size / PAGE_SIZE;
  344. xen_add_extra_mem(addr, size);
  345. } else
  346. type = E820_UNUSABLE;
  347. }
  348. xen_align_and_add_e820_region(addr, size, type);
  349. map[i].addr += size;
  350. map[i].size -= size;
  351. if (map[i].size == 0)
  352. i++;
  353. }
  354. /*
  355. * In domU, the ISA region is normal, usable memory, but we
  356. * reserve ISA memory anyway because too many things poke
  357. * about in there.
  358. */
  359. e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
  360. E820_RESERVED);
  361. /*
  362. * Reserve Xen bits:
  363. * - mfn_list
  364. * - xen_start_info
  365. * See comment above "struct start_info" in <xen/interface/xen.h>
  366. */
  367. memblock_reserve(__pa(xen_start_info->mfn_list),
  368. xen_start_info->pt_base - xen_start_info->mfn_list);
  369. sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  370. return "Xen";
  371. }
  372. /*
  373. * Set the bit indicating "nosegneg" library variants should be used.
  374. * We only need to bother in pure 32-bit mode; compat 32-bit processes
  375. * can have un-truncated segments, so wrapping around is allowed.
  376. */
  377. static void __init fiddle_vdso(void)
  378. {
  379. #ifdef CONFIG_X86_32
  380. u32 *mask;
  381. mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
  382. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  383. mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
  384. *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
  385. #endif
  386. }
  387. static int __cpuinit register_callback(unsigned type, const void *func)
  388. {
  389. struct callback_register callback = {
  390. .type = type,
  391. .address = XEN_CALLBACK(__KERNEL_CS, func),
  392. .flags = CALLBACKF_mask_events,
  393. };
  394. return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
  395. }
  396. void __cpuinit xen_enable_sysenter(void)
  397. {
  398. int ret;
  399. unsigned sysenter_feature;
  400. #ifdef CONFIG_X86_32
  401. sysenter_feature = X86_FEATURE_SEP;
  402. #else
  403. sysenter_feature = X86_FEATURE_SYSENTER32;
  404. #endif
  405. if (!boot_cpu_has(sysenter_feature))
  406. return;
  407. ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
  408. if(ret != 0)
  409. setup_clear_cpu_cap(sysenter_feature);
  410. }
  411. void __cpuinit xen_enable_syscall(void)
  412. {
  413. #ifdef CONFIG_X86_64
  414. int ret;
  415. ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
  416. if (ret != 0) {
  417. printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
  418. /* Pretty fatal; 64-bit userspace has no other
  419. mechanism for syscalls. */
  420. }
  421. if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
  422. ret = register_callback(CALLBACKTYPE_syscall32,
  423. xen_syscall32_target);
  424. if (ret != 0)
  425. setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
  426. }
  427. #endif /* CONFIG_X86_64 */
  428. }
  429. void __init xen_arch_setup(void)
  430. {
  431. xen_panic_handler_init();
  432. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
  433. HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
  434. if (!xen_feature(XENFEAT_auto_translated_physmap))
  435. HYPERVISOR_vm_assist(VMASST_CMD_enable,
  436. VMASST_TYPE_pae_extended_cr3);
  437. if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
  438. register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
  439. BUG();
  440. xen_enable_sysenter();
  441. xen_enable_syscall();
  442. #ifdef CONFIG_ACPI
  443. if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
  444. printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
  445. disable_acpi();
  446. }
  447. #endif
  448. memcpy(boot_command_line, xen_start_info->cmd_line,
  449. MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
  450. COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
  451. /* Set up idle, making sure it calls safe_halt() pvop */
  452. #ifdef CONFIG_X86_32
  453. boot_cpu_data.hlt_works_ok = 1;
  454. #endif
  455. disable_cpuidle();
  456. disable_cpufreq();
  457. WARN_ON(set_pm_idle_to_default());
  458. fiddle_vdso();
  459. }