/Ethereal-msm8939-beta9/arch/metag/mm/init.c

https://bitbucket.org/MilosStamenkovic95/etherealos · C · 430 lines · 272 code · 91 blank · 67 comment · 21 complexity · 3662ac8b9ca6172b4b32d62cc9e08af8 MD5 · raw file

  1. /*
  2. * Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
  3. *
  4. */
  5. #include <linux/export.h>
  6. #include <linux/mm.h>
  7. #include <linux/swap.h>
  8. #include <linux/init.h>
  9. #include <linux/bootmem.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/percpu.h>
  12. #include <linux/memblock.h>
  13. #include <linux/initrd.h>
  14. #include <linux/of_fdt.h>
  15. #include <asm/setup.h>
  16. #include <asm/page.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/mmu.h>
  19. #include <asm/mmu_context.h>
  20. #include <asm/sections.h>
  21. #include <asm/tlb.h>
  22. #include <asm/user_gateway.h>
  23. #include <asm/mmzone.h>
  24. #include <asm/fixmap.h>
  25. unsigned long pfn_base;
  26. EXPORT_SYMBOL(pfn_base);
  27. pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
  28. unsigned long empty_zero_page;
  29. EXPORT_SYMBOL(empty_zero_page);
  30. extern char __user_gateway_start;
  31. extern char __user_gateway_end;
  32. void *gateway_page;
  33. /*
  34. * Insert the gateway page into a set of page tables, creating the
  35. * page tables if necessary.
  36. */
  37. static void insert_gateway_page(pgd_t *pgd, unsigned long address)
  38. {
  39. pud_t *pud;
  40. pmd_t *pmd;
  41. pte_t *pte;
  42. BUG_ON(!pgd_present(*pgd));
  43. pud = pud_offset(pgd, address);
  44. BUG_ON(!pud_present(*pud));
  45. pmd = pmd_offset(pud, address);
  46. if (!pmd_present(*pmd)) {
  47. pte = alloc_bootmem_pages(PAGE_SIZE);
  48. set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
  49. }
  50. pte = pte_offset_kernel(pmd, address);
  51. set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
  52. }
  53. /* Alloc and map a page in a known location accessible to userspace. */
  54. static void __init user_gateway_init(void)
  55. {
  56. unsigned long address = USER_GATEWAY_PAGE;
  57. int offset = pgd_index(address);
  58. pgd_t *pgd;
  59. gateway_page = alloc_bootmem_pages(PAGE_SIZE);
  60. pgd = swapper_pg_dir + offset;
  61. insert_gateway_page(pgd, address);
  62. #ifdef CONFIG_METAG_META12
  63. /*
  64. * Insert the gateway page into our current page tables even
  65. * though we've already inserted it into our reference page
  66. * table (swapper_pg_dir). This is because with a META1 mmu we
  67. * copy just the user address range and not the gateway page
  68. * entry on context switch, see switch_mmu().
  69. */
  70. pgd = (pgd_t *)mmu_get_base() + offset;
  71. insert_gateway_page(pgd, address);
  72. #endif /* CONFIG_METAG_META12 */
  73. BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
  74. gateway_page += (address & ~PAGE_MASK);
  75. memcpy(gateway_page, &__user_gateway_start,
  76. &__user_gateway_end - &__user_gateway_start);
  77. /*
  78. * We don't need to flush the TLB here, there should be no mapping
  79. * present at boot for this address and only valid mappings are in
  80. * the TLB (apart from on Meta 1.x, but those cached invalid
  81. * mappings should be impossible to hit here).
  82. *
  83. * We don't flush the code cache here even though we have written
  84. * code through the data cache and they may not be coherent. At
  85. * this point we assume there is no stale data in the code cache
  86. * for this address so there is no need to flush.
  87. */
  88. }
  89. static void __init allocate_pgdat(unsigned int nid)
  90. {
  91. unsigned long start_pfn, end_pfn;
  92. #ifdef CONFIG_NEED_MULTIPLE_NODES
  93. unsigned long phys;
  94. #endif
  95. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  96. #ifdef CONFIG_NEED_MULTIPLE_NODES
  97. phys = __memblock_alloc_base(sizeof(struct pglist_data),
  98. SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
  99. /* Retry with all of system memory */
  100. if (!phys)
  101. phys = __memblock_alloc_base(sizeof(struct pglist_data),
  102. SMP_CACHE_BYTES,
  103. memblock_end_of_DRAM());
  104. if (!phys)
  105. panic("Can't allocate pgdat for node %d\n", nid);
  106. NODE_DATA(nid) = __va(phys);
  107. memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
  108. NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
  109. #endif
  110. NODE_DATA(nid)->node_start_pfn = start_pfn;
  111. NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
  112. }
  113. static void __init bootmem_init_one_node(unsigned int nid)
  114. {
  115. unsigned long total_pages, paddr;
  116. unsigned long end_pfn;
  117. struct pglist_data *p;
  118. p = NODE_DATA(nid);
  119. /* Nothing to do.. */
  120. if (!p->node_spanned_pages)
  121. return;
  122. end_pfn = p->node_start_pfn + p->node_spanned_pages;
  123. #ifdef CONFIG_HIGHMEM
  124. if (end_pfn > max_low_pfn)
  125. end_pfn = max_low_pfn;
  126. #endif
  127. total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
  128. paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
  129. if (!paddr)
  130. panic("Can't allocate bootmap for nid[%d]\n", nid);
  131. init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
  132. free_bootmem_with_active_regions(nid, end_pfn);
  133. /*
  134. * XXX Handle initial reservations for the system memory node
  135. * only for the moment, we'll refactor this later for handling
  136. * reservations in other nodes.
  137. */
  138. if (nid == 0) {
  139. struct memblock_region *reg;
  140. /* Reserve the sections we're already using. */
  141. for_each_memblock(reserved, reg) {
  142. unsigned long size = reg->size;
  143. #ifdef CONFIG_HIGHMEM
  144. /* ...but not highmem */
  145. if (PFN_DOWN(reg->base) >= highstart_pfn)
  146. continue;
  147. if (PFN_UP(reg->base + size) > highstart_pfn)
  148. size = (highstart_pfn - PFN_DOWN(reg->base))
  149. << PAGE_SHIFT;
  150. #endif
  151. reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
  152. }
  153. }
  154. sparse_memory_present_with_active_regions(nid);
  155. }
  156. static void __init do_init_bootmem(void)
  157. {
  158. struct memblock_region *reg;
  159. int i;
  160. /* Add active regions with valid PFNs. */
  161. for_each_memblock(memory, reg) {
  162. unsigned long start_pfn, end_pfn;
  163. start_pfn = memblock_region_memory_base_pfn(reg);
  164. end_pfn = memblock_region_memory_end_pfn(reg);
  165. memblock_set_node(PFN_PHYS(start_pfn),
  166. PFN_PHYS(end_pfn - start_pfn), 0);
  167. }
  168. /* All of system RAM sits in node 0 for the non-NUMA case */
  169. allocate_pgdat(0);
  170. node_set_online(0);
  171. soc_mem_setup();
  172. for_each_online_node(i)
  173. bootmem_init_one_node(i);
  174. sparse_init();
  175. }
  176. extern char _heap_start[];
  177. static void __init init_and_reserve_mem(void)
  178. {
  179. unsigned long start_pfn, heap_start;
  180. u64 base = min_low_pfn << PAGE_SHIFT;
  181. u64 size = (max_low_pfn << PAGE_SHIFT) - base;
  182. heap_start = (unsigned long) &_heap_start;
  183. memblock_add(base, size);
  184. /*
  185. * Partially used pages are not usable - thus
  186. * we are rounding upwards:
  187. */
  188. start_pfn = PFN_UP(__pa(heap_start));
  189. /*
  190. * Reserve the kernel text.
  191. */
  192. memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
  193. #ifdef CONFIG_HIGHMEM
  194. /*
  195. * Add & reserve highmem, so page structures are initialised.
  196. */
  197. base = highstart_pfn << PAGE_SHIFT;
  198. size = (highend_pfn << PAGE_SHIFT) - base;
  199. if (size) {
  200. memblock_add(base, size);
  201. memblock_reserve(base, size);
  202. }
  203. #endif
  204. }
  205. #ifdef CONFIG_HIGHMEM
  206. /*
  207. * Ensure we have allocated page tables in swapper_pg_dir for the
  208. * fixed mappings range from 'start' to 'end'.
  209. */
  210. static void __init allocate_pgtables(unsigned long start, unsigned long end)
  211. {
  212. pgd_t *pgd;
  213. pmd_t *pmd;
  214. pte_t *pte;
  215. int i, j;
  216. unsigned long vaddr;
  217. vaddr = start;
  218. i = pgd_index(vaddr);
  219. j = pmd_index(vaddr);
  220. pgd = swapper_pg_dir + i;
  221. for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
  222. pmd = (pmd_t *)pgd;
  223. for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
  224. vaddr += PMD_SIZE;
  225. if (!pmd_none(*pmd))
  226. continue;
  227. pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
  228. pmd_populate_kernel(&init_mm, pmd, pte);
  229. }
  230. j = 0;
  231. }
  232. }
  233. static void __init fixedrange_init(void)
  234. {
  235. unsigned long vaddr, end;
  236. pgd_t *pgd;
  237. pud_t *pud;
  238. pmd_t *pmd;
  239. pte_t *pte;
  240. /*
  241. * Fixed mappings:
  242. */
  243. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  244. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  245. allocate_pgtables(vaddr, end);
  246. /*
  247. * Permanent kmaps:
  248. */
  249. vaddr = PKMAP_BASE;
  250. allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
  251. pgd = swapper_pg_dir + pgd_index(vaddr);
  252. pud = pud_offset(pgd, vaddr);
  253. pmd = pmd_offset(pud, vaddr);
  254. pte = pte_offset_kernel(pmd, vaddr);
  255. pkmap_page_table = pte;
  256. }
  257. #endif /* CONFIG_HIGHMEM */
  258. /*
  259. * paging_init() continues the virtual memory environment setup which
  260. * was begun by the code in arch/metag/kernel/setup.c.
  261. */
  262. void __init paging_init(unsigned long mem_end)
  263. {
  264. unsigned long max_zone_pfns[MAX_NR_ZONES];
  265. int nid;
  266. init_and_reserve_mem();
  267. memblock_allow_resize();
  268. memblock_dump_all();
  269. nodes_clear(node_online_map);
  270. init_new_context(&init_task, &init_mm);
  271. memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
  272. do_init_bootmem();
  273. mmu_init(mem_end);
  274. #ifdef CONFIG_HIGHMEM
  275. fixedrange_init();
  276. kmap_init();
  277. #endif
  278. /* Initialize the zero page to a bootmem page, already zeroed. */
  279. empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
  280. user_gateway_init();
  281. memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  282. for_each_online_node(nid) {
  283. pg_data_t *pgdat = NODE_DATA(nid);
  284. unsigned long low, start_pfn;
  285. start_pfn = pgdat->bdata->node_min_pfn;
  286. low = pgdat->bdata->node_low_pfn;
  287. if (max_zone_pfns[ZONE_NORMAL] < low)
  288. max_zone_pfns[ZONE_NORMAL] = low;
  289. #ifdef CONFIG_HIGHMEM
  290. max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
  291. #endif
  292. pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
  293. nid, start_pfn, low);
  294. }
  295. free_area_init_nodes(max_zone_pfns);
  296. }
  297. void __init mem_init(void)
  298. {
  299. int nid;
  300. #ifdef CONFIG_HIGHMEM
  301. unsigned long tmp;
  302. /*
  303. * Explicitly reset zone->managed_pages because highmem pages are
  304. * freed before calling free_all_bootmem_node();
  305. */
  306. reset_all_zones_managed_pages();
  307. for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
  308. free_highmem_page(pfn_to_page(tmp));
  309. num_physpages += totalhigh_pages;
  310. #endif /* CONFIG_HIGHMEM */
  311. for_each_online_node(nid) {
  312. pg_data_t *pgdat = NODE_DATA(nid);
  313. num_physpages += pgdat->node_present_pages;
  314. if (pgdat->node_spanned_pages)
  315. free_all_bootmem_node(pgdat);
  316. }
  317. pr_info("Memory: %luk/%luk available\n",
  318. (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
  319. num_physpages << (PAGE_SHIFT - 10));
  320. show_mem(0);
  321. return;
  322. }
  323. void free_initmem(void)
  324. {
  325. free_initmem_default(POISON_FREE_INITMEM);
  326. }
  327. #ifdef CONFIG_BLK_DEV_INITRD
  328. void free_initrd_mem(unsigned long start, unsigned long end)
  329. {
  330. free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
  331. }
  332. #endif
  333. #ifdef CONFIG_OF_FLATTREE
  334. void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
  335. {
  336. pr_err("%s(%llx, %llx)\n",
  337. __func__, start, end);
  338. }
  339. #endif /* CONFIG_OF_FLATTREE */