PageRenderTime 22ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/x86/kernel/vmlinux.lds.S

https://bitbucket.org/dimichxp/linux-magx-pbranch
Assembly | 433 lines | 433 code | 0 blank | 0 comment | 1 complexity | f75e906ec0159fe17e017ed66ae903c1 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * ld script for the x86 kernel
  3. *
  4. * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  5. *
  6. * Modernisation, unification and other changes and fixes:
  7. * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
  8. *
  9. *
  10. * Don't define absolute symbols until and unless you know that symbol
  11. * value is should remain constant even if kernel image is relocated
  12. * at run time. Absolute symbols are not relocated. If symbol value should
  13. * change if kernel is relocated, make the symbol section relative and
  14. * put it inside the section definition.
  15. */
  16. #ifdef CONFIG_X86_32
  17. #define LOAD_OFFSET __PAGE_OFFSET
  18. #else
  19. #define LOAD_OFFSET __START_KERNEL_map
  20. #endif
  21. #include <asm-generic/vmlinux.lds.h>
  22. #include <asm/asm-offsets.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/page_types.h>
  25. #include <asm/cache.h>
  26. #include <asm/boot.h>
  27. #undef i386 /* in case the preprocessor is a 32bit one */
  28. OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
  29. #ifdef CONFIG_X86_32
  30. OUTPUT_ARCH(i386)
  31. ENTRY(phys_startup_32)
  32. jiffies = jiffies_64;
  33. #else
  34. OUTPUT_ARCH(i386:x86-64)
  35. ENTRY(phys_startup_64)
  36. jiffies_64 = jiffies;
  37. #endif
  38. PHDRS {
  39. text PT_LOAD FLAGS(5); /* R_E */
  40. data PT_LOAD FLAGS(7); /* RWE */
  41. #ifdef CONFIG_X86_64
  42. user PT_LOAD FLAGS(7); /* RWE */
  43. data.init PT_LOAD FLAGS(7); /* RWE */
  44. #ifdef CONFIG_SMP
  45. percpu PT_LOAD FLAGS(7); /* RWE */
  46. #endif
  47. data.init2 PT_LOAD FLAGS(7); /* RWE */
  48. #endif
  49. note PT_NOTE FLAGS(0); /* ___ */
  50. }
  51. SECTIONS
  52. {
  53. #ifdef CONFIG_X86_32
  54. . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
  55. phys_startup_32 = startup_32 - LOAD_OFFSET;
  56. #else
  57. . = __START_KERNEL;
  58. phys_startup_64 = startup_64 - LOAD_OFFSET;
  59. #endif
  60. /* Text and read-only data */
  61. /* bootstrapping code */
  62. .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
  63. _text = .;
  64. *(.text.head)
  65. } :text = 0x9090
  66. /* The rest of the text */
  67. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  68. #ifdef CONFIG_X86_32
  69. /* not really needed, already page aligned */
  70. . = ALIGN(PAGE_SIZE);
  71. *(.text.page_aligned)
  72. #endif
  73. . = ALIGN(8);
  74. _stext = .;
  75. TEXT_TEXT
  76. SCHED_TEXT
  77. LOCK_TEXT
  78. KPROBES_TEXT
  79. IRQENTRY_TEXT
  80. *(.fixup)
  81. *(.gnu.warning)
  82. /* End of text section */
  83. _etext = .;
  84. } :text = 0x9090
  85. NOTES :text :note
  86. /* Exception table */
  87. . = ALIGN(16);
  88. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  89. __start___ex_table = .;
  90. *(__ex_table)
  91. __stop___ex_table = .;
  92. } :text = 0x9090
  93. RODATA
  94. /* Data */
  95. . = ALIGN(PAGE_SIZE);
  96. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  97. /* Start of data section */
  98. _sdata = .;
  99. DATA_DATA
  100. CONSTRUCTORS
  101. #ifdef CONFIG_X86_64
  102. /* End of data section */
  103. _edata = .;
  104. #endif
  105. } :data
  106. #ifdef CONFIG_X86_32
  107. /* 32 bit has nosave before _edata */
  108. . = ALIGN(PAGE_SIZE);
  109. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  110. __nosave_begin = .;
  111. *(.data.nosave)
  112. . = ALIGN(PAGE_SIZE);
  113. __nosave_end = .;
  114. }
  115. #endif
  116. . = ALIGN(PAGE_SIZE);
  117. .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
  118. *(.data.page_aligned)
  119. *(.data.idt)
  120. }
  121. #ifdef CONFIG_X86_32
  122. . = ALIGN(32);
  123. #else
  124. . = ALIGN(PAGE_SIZE);
  125. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  126. #endif
  127. .data.cacheline_aligned :
  128. AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  129. *(.data.cacheline_aligned)
  130. }
  131. /* rarely changed data like cpu maps */
  132. #ifdef CONFIG_X86_32
  133. . = ALIGN(32);
  134. #else
  135. . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  136. #endif
  137. .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  138. *(.data.read_mostly)
  139. #ifdef CONFIG_X86_32
  140. /* End of data section */
  141. _edata = .;
  142. #endif
  143. }
  144. #ifdef CONFIG_X86_64
  145. #define VSYSCALL_ADDR (-10*1024*1024)
  146. #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
  147. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  148. #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
  149. SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  150. #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  151. #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  152. #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  153. #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  154. . = VSYSCALL_ADDR;
  155. .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) {
  156. *(.vsyscall_0)
  157. } :user
  158. __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  159. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  160. .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
  161. *(.vsyscall_fn)
  162. }
  163. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  164. .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
  165. *(.vsyscall_gtod_data)
  166. }
  167. vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
  168. .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
  169. *(.vsyscall_clock)
  170. }
  171. vsyscall_clock = VVIRT(.vsyscall_clock);
  172. .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
  173. *(.vsyscall_1)
  174. }
  175. .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
  176. *(.vsyscall_2)
  177. }
  178. .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
  179. *(.vgetcpu_mode)
  180. }
  181. vgetcpu_mode = VVIRT(.vgetcpu_mode);
  182. . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  183. .jiffies : AT(VLOAD(.jiffies)) {
  184. *(.jiffies)
  185. }
  186. jiffies = VVIRT(.jiffies);
  187. .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
  188. *(.vsyscall_3)
  189. }
  190. . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
  191. #undef VSYSCALL_ADDR
  192. #undef VSYSCALL_PHYS_ADDR
  193. #undef VSYSCALL_VIRT_ADDR
  194. #undef VLOAD_OFFSET
  195. #undef VLOAD
  196. #undef VVIRT_OFFSET
  197. #undef VVIRT
  198. #endif /* CONFIG_X86_64 */
  199. /* init_task */
  200. . = ALIGN(THREAD_SIZE);
  201. .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
  202. *(.data.init_task)
  203. }
  204. #ifdef CONFIG_X86_64
  205. :data.init
  206. #endif
  207. /*
  208. * smp_locks might be freed after init
  209. * start/end must be page aligned
  210. */
  211. . = ALIGN(PAGE_SIZE);
  212. .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
  213. __smp_locks = .;
  214. *(.smp_locks)
  215. __smp_locks_end = .;
  216. . = ALIGN(PAGE_SIZE);
  217. }
  218. /* Init code and data - will be freed after init */
  219. . = ALIGN(PAGE_SIZE);
  220. .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
  221. __init_begin = .; /* paired with __init_end */
  222. _sinittext = .;
  223. INIT_TEXT
  224. _einittext = .;
  225. }
  226. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  227. INIT_DATA
  228. }
  229. . = ALIGN(16);
  230. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
  231. __setup_start = .;
  232. *(.init.setup)
  233. __setup_end = .;
  234. }
  235. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  236. __initcall_start = .;
  237. INITCALLS
  238. __initcall_end = .;
  239. }
  240. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  241. __con_initcall_start = .;
  242. *(.con_initcall.init)
  243. __con_initcall_end = .;
  244. }
  245. .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
  246. __x86_cpu_dev_start = .;
  247. *(.x86_cpu_dev.init)
  248. __x86_cpu_dev_end = .;
  249. }
  250. SECURITY_INIT
  251. . = ALIGN(8);
  252. .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
  253. __parainstructions = .;
  254. *(.parainstructions)
  255. __parainstructions_end = .;
  256. }
  257. . = ALIGN(8);
  258. .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
  259. __alt_instructions = .;
  260. *(.altinstructions)
  261. __alt_instructions_end = .;
  262. }
  263. .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
  264. *(.altinstr_replacement)
  265. }
  266. /*
  267. * .exit.text is discard at runtime, not link time, to deal with
  268. * references from .altinstructions and .eh_frame
  269. */
  270. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  271. EXIT_TEXT
  272. }
  273. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  274. EXIT_DATA
  275. }
  276. #ifdef CONFIG_BLK_DEV_INITRD
  277. . = ALIGN(PAGE_SIZE);
  278. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
  279. __initramfs_start = .;
  280. *(.init.ramfs)
  281. __initramfs_end = .;
  282. }
  283. #endif
  284. #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
  285. /*
  286. * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
  287. * output PHDR, so the next output section - __data_nosave - should
  288. * start another section data.init2. Also, pda should be at the head of
  289. * percpu area. Preallocate it and define the percpu offset symbol
  290. * so that it can be accessed as a percpu variable.
  291. */
  292. . = ALIGN(PAGE_SIZE);
  293. PERCPU_VADDR(0, :percpu)
  294. #else
  295. PERCPU(PAGE_SIZE)
  296. #endif
  297. . = ALIGN(PAGE_SIZE);
  298. /* freed after init ends here */
  299. .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
  300. __init_end = .;
  301. }
  302. #ifdef CONFIG_X86_64
  303. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  304. . = ALIGN(PAGE_SIZE);
  305. __nosave_begin = .;
  306. *(.data.nosave)
  307. . = ALIGN(PAGE_SIZE);
  308. __nosave_end = .;
  309. } :data.init2
  310. /* use another section data.init2, see PERCPU_VADDR() above */
  311. #endif
  312. /* BSS */
  313. . = ALIGN(PAGE_SIZE);
  314. .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
  315. __bss_start = .;
  316. *(.bss.page_aligned)
  317. *(.bss)
  318. . = ALIGN(4);
  319. __bss_stop = .;
  320. }
  321. . = ALIGN(PAGE_SIZE);
  322. .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
  323. __brk_base = .;
  324. . += 64 * 1024; /* 64k alignment slop space */
  325. *(.brk_reservation) /* areas brk users have reserved */
  326. __brk_limit = .;
  327. }
  328. .end : AT(ADDR(.end) - LOAD_OFFSET) {
  329. _end = .;
  330. }
  331. /* Sections to be discarded */
  332. /DISCARD/ : {
  333. *(.exitcall.exit)
  334. *(.eh_frame)
  335. *(.discard)
  336. }
  337. STABS_DEBUG
  338. DWARF_DEBUG
  339. }
  340. #ifdef CONFIG_X86_32
  341. ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
  342. "kernel image bigger than KERNEL_IMAGE_SIZE")
  343. #else
  344. /*
  345. * Per-cpu symbols which need to be offset from __per_cpu_load
  346. * for the boot processor.
  347. */
  348. #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
  349. INIT_PER_CPU(gdt_page);
  350. INIT_PER_CPU(irq_stack_union);
  351. /*
  352. * Build-time check on the image size:
  353. */
  354. ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
  355. "kernel image bigger than KERNEL_IMAGE_SIZE")
  356. #ifdef CONFIG_SMP
  357. ASSERT((per_cpu__irq_stack_union == 0),
  358. "irq_stack_union is not at start of per-cpu area");
  359. #endif
  360. #endif /* CONFIG_X86_32 */
  361. #ifdef CONFIG_KEXEC
  362. #include <asm/kexec.h>
  363. ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
  364. "kexec control code size is too big")
  365. #endif