PageRenderTime 1385ms CodeModel.GetById 32ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/xen/balloon.c

https://github.com/mcr/linux-2.6
C | 574 lines | 390 code | 117 blank | 67 comment | 46 complexity | 49e81070626722d8fff018bd1a1d730a MD5 | raw file
  1. /******************************************************************************
  2. * balloon.c
  3. *
  4. * Xen balloon driver - enables returning/claiming memory to/from Xen.
  5. *
  6. * Copyright (c) 2003, B Dragovic
  7. * Copyright (c) 2003-2004, M Williamson, K Fraser
  8. * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/sched.h>
  37. #include <linux/errno.h>
  38. #include <linux/mm.h>
  39. #include <linux/bootmem.h>
  40. #include <linux/pagemap.h>
  41. #include <linux/highmem.h>
  42. #include <linux/mutex.h>
  43. #include <linux/list.h>
  44. #include <linux/sysdev.h>
  45. #include <linux/gfp.h>
  46. #include <asm/page.h>
  47. #include <asm/pgalloc.h>
  48. #include <asm/pgtable.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/tlb.h>
  51. #include <asm/xen/hypervisor.h>
  52. #include <asm/xen/hypercall.h>
  53. #include <xen/xen.h>
  54. #include <xen/interface/xen.h>
  55. #include <xen/interface/memory.h>
  56. #include <xen/xenbus.h>
  57. #include <xen/features.h>
  58. #include <xen/page.h>
  59. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  60. #define BALLOON_CLASS_NAME "xen_memory"
  61. struct balloon_stats {
  62. /* We aim for 'current allocation' == 'target allocation'. */
  63. unsigned long current_pages;
  64. unsigned long target_pages;
  65. /*
  66. * Drivers may alter the memory reservation independently, but they
  67. * must inform the balloon driver so we avoid hitting the hard limit.
  68. */
  69. unsigned long driver_pages;
  70. /* Number of pages in high- and low-memory balloons. */
  71. unsigned long balloon_low;
  72. unsigned long balloon_high;
  73. };
  74. static DEFINE_MUTEX(balloon_mutex);
  75. static struct sys_device balloon_sysdev;
  76. static int register_balloon(struct sys_device *sysdev);
  77. static struct balloon_stats balloon_stats;
  78. /* We increase/decrease in batches which fit in a page */
  79. static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
  80. #ifdef CONFIG_HIGHMEM
  81. #define inc_totalhigh_pages() (totalhigh_pages++)
  82. #define dec_totalhigh_pages() (totalhigh_pages--)
  83. #else
  84. #define inc_totalhigh_pages() do {} while(0)
  85. #define dec_totalhigh_pages() do {} while(0)
  86. #endif
  87. /* List of ballooned pages, threaded through the mem_map array. */
  88. static LIST_HEAD(ballooned_pages);
  89. /* Main work function, always executed in process context. */
  90. static void balloon_process(struct work_struct *work);
  91. static DECLARE_WORK(balloon_worker, balloon_process);
  92. static struct timer_list balloon_timer;
  93. /* When ballooning out (allocating memory to return to Xen) we don't really
  94. want the kernel to try too hard since that can trigger the oom killer. */
  95. #define GFP_BALLOON \
  96. (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
  97. static void scrub_page(struct page *page)
  98. {
  99. #ifdef CONFIG_XEN_SCRUB_PAGES
  100. clear_highpage(page);
  101. #endif
  102. }
  103. /* balloon_append: add the given page to the balloon. */
  104. static void balloon_append(struct page *page)
  105. {
  106. /* Lowmem is re-populated first, so highmem pages go at list tail. */
  107. if (PageHighMem(page)) {
  108. list_add_tail(&page->lru, &ballooned_pages);
  109. balloon_stats.balloon_high++;
  110. dec_totalhigh_pages();
  111. } else {
  112. list_add(&page->lru, &ballooned_pages);
  113. balloon_stats.balloon_low++;
  114. }
  115. totalram_pages--;
  116. }
  117. /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
  118. static struct page *balloon_retrieve(void)
  119. {
  120. struct page *page;
  121. if (list_empty(&ballooned_pages))
  122. return NULL;
  123. page = list_entry(ballooned_pages.next, struct page, lru);
  124. list_del(&page->lru);
  125. if (PageHighMem(page)) {
  126. balloon_stats.balloon_high--;
  127. inc_totalhigh_pages();
  128. }
  129. else
  130. balloon_stats.balloon_low--;
  131. totalram_pages++;
  132. return page;
  133. }
  134. static struct page *balloon_first_page(void)
  135. {
  136. if (list_empty(&ballooned_pages))
  137. return NULL;
  138. return list_entry(ballooned_pages.next, struct page, lru);
  139. }
  140. static struct page *balloon_next_page(struct page *page)
  141. {
  142. struct list_head *next = page->lru.next;
  143. if (next == &ballooned_pages)
  144. return NULL;
  145. return list_entry(next, struct page, lru);
  146. }
  147. static void balloon_alarm(unsigned long unused)
  148. {
  149. schedule_work(&balloon_worker);
  150. }
  151. static unsigned long current_target(void)
  152. {
  153. unsigned long target = balloon_stats.target_pages;
  154. target = min(target,
  155. balloon_stats.current_pages +
  156. balloon_stats.balloon_low +
  157. balloon_stats.balloon_high);
  158. return target;
  159. }
  160. static int increase_reservation(unsigned long nr_pages)
  161. {
  162. unsigned long pfn, i, flags;
  163. struct page *page;
  164. long rc;
  165. struct xen_memory_reservation reservation = {
  166. .address_bits = 0,
  167. .extent_order = 0,
  168. .domid = DOMID_SELF
  169. };
  170. if (nr_pages > ARRAY_SIZE(frame_list))
  171. nr_pages = ARRAY_SIZE(frame_list);
  172. spin_lock_irqsave(&xen_reservation_lock, flags);
  173. page = balloon_first_page();
  174. for (i = 0; i < nr_pages; i++) {
  175. BUG_ON(page == NULL);
  176. frame_list[i] = page_to_pfn(page);
  177. page = balloon_next_page(page);
  178. }
  179. set_xen_guest_handle(reservation.extent_start, frame_list);
  180. reservation.nr_extents = nr_pages;
  181. rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
  182. if (rc < 0)
  183. goto out;
  184. for (i = 0; i < rc; i++) {
  185. page = balloon_retrieve();
  186. BUG_ON(page == NULL);
  187. pfn = page_to_pfn(page);
  188. BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
  189. phys_to_machine_mapping_valid(pfn));
  190. set_phys_to_machine(pfn, frame_list[i]);
  191. /* Link back into the page tables if not highmem. */
  192. if (pfn < max_low_pfn) {
  193. int ret;
  194. ret = HYPERVISOR_update_va_mapping(
  195. (unsigned long)__va(pfn << PAGE_SHIFT),
  196. mfn_pte(frame_list[i], PAGE_KERNEL),
  197. 0);
  198. BUG_ON(ret);
  199. }
  200. /* Relinquish the page back to the allocator. */
  201. ClearPageReserved(page);
  202. init_page_count(page);
  203. __free_page(page);
  204. }
  205. balloon_stats.current_pages += rc;
  206. out:
  207. spin_unlock_irqrestore(&xen_reservation_lock, flags);
  208. return rc < 0 ? rc : rc != nr_pages;
  209. }
  210. static int decrease_reservation(unsigned long nr_pages)
  211. {
  212. unsigned long pfn, i, flags;
  213. struct page *page;
  214. int need_sleep = 0;
  215. int ret;
  216. struct xen_memory_reservation reservation = {
  217. .address_bits = 0,
  218. .extent_order = 0,
  219. .domid = DOMID_SELF
  220. };
  221. if (nr_pages > ARRAY_SIZE(frame_list))
  222. nr_pages = ARRAY_SIZE(frame_list);
  223. for (i = 0; i < nr_pages; i++) {
  224. if ((page = alloc_page(GFP_BALLOON)) == NULL) {
  225. nr_pages = i;
  226. need_sleep = 1;
  227. break;
  228. }
  229. pfn = page_to_pfn(page);
  230. frame_list[i] = pfn_to_mfn(pfn);
  231. scrub_page(page);
  232. if (!PageHighMem(page)) {
  233. ret = HYPERVISOR_update_va_mapping(
  234. (unsigned long)__va(pfn << PAGE_SHIFT),
  235. __pte_ma(0), 0);
  236. BUG_ON(ret);
  237. }
  238. }
  239. /* Ensure that ballooned highmem pages don't have kmaps. */
  240. kmap_flush_unused();
  241. flush_tlb_all();
  242. spin_lock_irqsave(&xen_reservation_lock, flags);
  243. /* No more mappings: invalidate P2M and add to balloon. */
  244. for (i = 0; i < nr_pages; i++) {
  245. pfn = mfn_to_pfn(frame_list[i]);
  246. set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  247. balloon_append(pfn_to_page(pfn));
  248. }
  249. set_xen_guest_handle(reservation.extent_start, frame_list);
  250. reservation.nr_extents = nr_pages;
  251. ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  252. BUG_ON(ret != nr_pages);
  253. balloon_stats.current_pages -= nr_pages;
  254. spin_unlock_irqrestore(&xen_reservation_lock, flags);
  255. return need_sleep;
  256. }
  257. /*
  258. * We avoid multiple worker processes conflicting via the balloon mutex.
  259. * We may of course race updates of the target counts (which are protected
  260. * by the balloon lock), or with changes to the Xen hard limit, but we will
  261. * recover from these in time.
  262. */
  263. static void balloon_process(struct work_struct *work)
  264. {
  265. int need_sleep = 0;
  266. long credit;
  267. mutex_lock(&balloon_mutex);
  268. do {
  269. credit = current_target() - balloon_stats.current_pages;
  270. if (credit > 0)
  271. need_sleep = (increase_reservation(credit) != 0);
  272. if (credit < 0)
  273. need_sleep = (decrease_reservation(-credit) != 0);
  274. #ifndef CONFIG_PREEMPT
  275. if (need_resched())
  276. schedule();
  277. #endif
  278. } while ((credit != 0) && !need_sleep);
  279. /* Schedule more work if there is some still to be done. */
  280. if (current_target() != balloon_stats.current_pages)
  281. mod_timer(&balloon_timer, jiffies + HZ);
  282. mutex_unlock(&balloon_mutex);
  283. }
  284. /* Resets the Xen limit, sets new target, and kicks off processing. */
  285. static void balloon_set_new_target(unsigned long target)
  286. {
  287. /* No need for lock. Not read-modify-write updates. */
  288. balloon_stats.target_pages = target;
  289. schedule_work(&balloon_worker);
  290. }
  291. static struct xenbus_watch target_watch =
  292. {
  293. .node = "memory/target"
  294. };
  295. /* React to a change in the target key */
  296. static void watch_target(struct xenbus_watch *watch,
  297. const char **vec, unsigned int len)
  298. {
  299. unsigned long long new_target;
  300. int err;
  301. err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
  302. if (err != 1) {
  303. /* This is ok (for domain0 at least) - so just return */
  304. return;
  305. }
  306. /* The given memory/target value is in KiB, so it needs converting to
  307. * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
  308. */
  309. balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
  310. }
  311. static int balloon_init_watcher(struct notifier_block *notifier,
  312. unsigned long event,
  313. void *data)
  314. {
  315. int err;
  316. err = register_xenbus_watch(&target_watch);
  317. if (err)
  318. printk(KERN_ERR "Failed to set balloon watcher\n");
  319. return NOTIFY_DONE;
  320. }
  321. static struct notifier_block xenstore_notifier;
  322. static int __init balloon_init(void)
  323. {
  324. unsigned long pfn;
  325. struct page *page;
  326. if (!xen_pv_domain())
  327. return -ENODEV;
  328. pr_info("xen_balloon: Initialising balloon driver.\n");
  329. balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
  330. balloon_stats.target_pages = balloon_stats.current_pages;
  331. balloon_stats.balloon_low = 0;
  332. balloon_stats.balloon_high = 0;
  333. balloon_stats.driver_pages = 0UL;
  334. init_timer(&balloon_timer);
  335. balloon_timer.data = 0;
  336. balloon_timer.function = balloon_alarm;
  337. register_balloon(&balloon_sysdev);
  338. /* Initialise the balloon with excess memory space. */
  339. for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
  340. page = pfn_to_page(pfn);
  341. if (!PageReserved(page))
  342. balloon_append(page);
  343. }
  344. target_watch.callback = watch_target;
  345. xenstore_notifier.notifier_call = balloon_init_watcher;
  346. register_xenstore_notifier(&xenstore_notifier);
  347. return 0;
  348. }
  349. subsys_initcall(balloon_init);
  350. static void balloon_exit(void)
  351. {
  352. /* XXX - release balloon here */
  353. return;
  354. }
  355. module_exit(balloon_exit);
  356. #define BALLOON_SHOW(name, format, args...) \
  357. static ssize_t show_##name(struct sys_device *dev, \
  358. struct sysdev_attribute *attr, \
  359. char *buf) \
  360. { \
  361. return sprintf(buf, format, ##args); \
  362. } \
  363. static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
  364. BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
  365. BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
  366. BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
  367. BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
  368. static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
  369. char *buf)
  370. {
  371. return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
  372. }
  373. static ssize_t store_target_kb(struct sys_device *dev,
  374. struct sysdev_attribute *attr,
  375. const char *buf,
  376. size_t count)
  377. {
  378. char *endchar;
  379. unsigned long long target_bytes;
  380. if (!capable(CAP_SYS_ADMIN))
  381. return -EPERM;
  382. target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
  383. balloon_set_new_target(target_bytes >> PAGE_SHIFT);
  384. return count;
  385. }
  386. static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
  387. show_target_kb, store_target_kb);
  388. static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
  389. char *buf)
  390. {
  391. return sprintf(buf, "%llu\n",
  392. (unsigned long long)balloon_stats.target_pages
  393. << PAGE_SHIFT);
  394. }
  395. static ssize_t store_target(struct sys_device *dev,
  396. struct sysdev_attribute *attr,
  397. const char *buf,
  398. size_t count)
  399. {
  400. char *endchar;
  401. unsigned long long target_bytes;
  402. if (!capable(CAP_SYS_ADMIN))
  403. return -EPERM;
  404. target_bytes = memparse(buf, &endchar);
  405. balloon_set_new_target(target_bytes >> PAGE_SHIFT);
  406. return count;
  407. }
  408. static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
  409. show_target, store_target);
  410. static struct sysdev_attribute *balloon_attrs[] = {
  411. &attr_target_kb,
  412. &attr_target,
  413. };
  414. static struct attribute *balloon_info_attrs[] = {
  415. &attr_current_kb.attr,
  416. &attr_low_kb.attr,
  417. &attr_high_kb.attr,
  418. &attr_driver_kb.attr,
  419. NULL
  420. };
  421. static struct attribute_group balloon_info_group = {
  422. .name = "info",
  423. .attrs = balloon_info_attrs,
  424. };
  425. static struct sysdev_class balloon_sysdev_class = {
  426. .name = BALLOON_CLASS_NAME,
  427. };
  428. static int register_balloon(struct sys_device *sysdev)
  429. {
  430. int i, error;
  431. error = sysdev_class_register(&balloon_sysdev_class);
  432. if (error)
  433. return error;
  434. sysdev->id = 0;
  435. sysdev->cls = &balloon_sysdev_class;
  436. error = sysdev_register(sysdev);
  437. if (error) {
  438. sysdev_class_unregister(&balloon_sysdev_class);
  439. return error;
  440. }
  441. for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
  442. error = sysdev_create_file(sysdev, balloon_attrs[i]);
  443. if (error)
  444. goto fail;
  445. }
  446. error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
  447. if (error)
  448. goto fail;
  449. return 0;
  450. fail:
  451. while (--i >= 0)
  452. sysdev_remove_file(sysdev, balloon_attrs[i]);
  453. sysdev_unregister(sysdev);
  454. sysdev_class_unregister(&balloon_sysdev_class);
  455. return error;
  456. }
  457. MODULE_LICENSE("GPL");