PageRenderTime 73ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/tboot-1.7.0/tboot/common/e820.c

#
C | 660 lines | 417 code | 80 blank | 163 comment | 123 complexity | 49ae5c28f3559afd644343f47b8970f0 MD5 | raw file
  1. /*
  2. * e820.c: support functions for manipulating the e820 table
  3. *
  4. * Copyright (c) 2006-2010, Intel Corporation
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. *
  11. * * Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * * Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following
  15. * disclaimer in the documentation and/or other materials provided
  16. * with the distribution.
  17. * * Neither the name of the Intel Corporation nor the names of its
  18. * contributors may be used to endorse or promote products derived
  19. * from this software without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  22. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  23. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  24. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  25. * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  26. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  27. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  28. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  30. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  32. * OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #include <config.h>
  36. #include <types.h>
  37. #include <stdbool.h>
  38. #include <printk.h>
  39. #include <cmdline.h>
  40. #include <multiboot.h>
  41. #include <stdarg.h>
  42. #include <misc.h>
  43. #include <pci_cfgreg.h>
  44. #include <e820.h>
  45. #include <txt/config_regs.h>
  46. /*
  47. * copy of bootloader/BIOS e820 table with adjusted entries
  48. * this version will replace original in mbi
  49. */
  50. #define MAX_E820_ENTRIES (TBOOT_E820_COPY_SIZE / sizeof(memory_map_t))
  51. static unsigned int g_nr_map;
  52. static memory_map_t *g_copy_e820_map = (memory_map_t *)TBOOT_E820_COPY_ADDR;
  53. static inline void split64b(uint64_t val, uint32_t *val_lo, uint32_t *val_hi)
  54. {
  55. *val_lo = (uint32_t)(val & 0xffffffff);
  56. *val_hi = (uint32_t)(val >> 32);
  57. }
  58. static inline uint64_t combine64b(uint32_t val_lo, uint32_t val_hi)
  59. {
  60. return ((uint64_t)val_hi << 32) | (uint64_t)val_lo;
  61. }
  62. static inline uint64_t e820_base_64(memory_map_t *entry)
  63. {
  64. return combine64b(entry->base_addr_low, entry->base_addr_high);
  65. }
  66. static inline uint64_t e820_length_64(memory_map_t *entry)
  67. {
  68. return combine64b(entry->length_low, entry->length_high);
  69. }
  70. /*
  71. * print_e820_map
  72. *
  73. * Prints copied e820 map w/o any header (i.e. just entries, indented by a tab)
  74. *
  75. */
  76. static void print_map(memory_map_t *e820, int nr_map)
  77. {
  78. for ( int i = 0; i < nr_map; i++ ) {
  79. memory_map_t *entry = &e820[i];
  80. uint64_t base_addr, length;
  81. base_addr = e820_base_64(entry);
  82. length = e820_length_64(entry);
  83. printk("\t%016Lx - %016Lx (%d)\n",
  84. (unsigned long long)base_addr,
  85. (unsigned long long)(base_addr + length),
  86. entry->type);
  87. }
  88. }
  89. static bool insert_after_region(memory_map_t *e820map, unsigned int *nr_map,
  90. unsigned int pos, uint64_t addr, uint64_t size,
  91. uint32_t type)
  92. {
  93. /* no more room */
  94. if ( *nr_map + 1 > MAX_E820_ENTRIES )
  95. return false;
  96. /* shift (copy) everything up one entry */
  97. for ( unsigned int i = *nr_map - 1; i > pos; i--)
  98. e820map[i+1] = e820map[i];
  99. /* now add our entry */
  100. split64b(addr, &(e820map[pos+1].base_addr_low),
  101. &(e820map[pos+1].base_addr_high));
  102. split64b(size, &(e820map[pos+1].length_low),
  103. &(e820map[pos+1].length_high));
  104. e820map[pos+1].type = type;
  105. e820map[pos+1].size = sizeof(memory_map_t) - sizeof(uint32_t);
  106. (*nr_map)++;
  107. return true;
  108. }
  109. static void remove_region(memory_map_t *e820map, unsigned int *nr_map,
  110. unsigned int pos)
  111. {
  112. /* shift (copy) everything down one entry */
  113. for ( unsigned int i = pos; i < *nr_map - 1; i++)
  114. e820map[i] = e820map[i+1];
  115. (*nr_map)--;
  116. }
  117. static bool protect_region(memory_map_t *e820map, unsigned int *nr_map,
  118. uint64_t new_addr, uint64_t new_size,
  119. uint32_t new_type)
  120. {
  121. uint64_t addr, tmp_addr, size, tmp_size;
  122. uint32_t type;
  123. unsigned int i;
  124. if ( new_size == 0 )
  125. return true;
  126. /* check for wrap */
  127. if ( new_addr + new_size < new_addr )
  128. return false;
  129. /* find where our region belongs in the table and insert it */
  130. for ( i = 0; i < *nr_map; i++ ) {
  131. addr = e820_base_64(&e820map[i]);
  132. size = e820_length_64(&e820map[i]);
  133. type = e820map[i].type;
  134. /* is our region at the beginning of the current map region? */
  135. if ( new_addr == addr ) {
  136. if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size,
  137. new_type) )
  138. return false;
  139. break;
  140. }
  141. /* are we w/in the current map region? */
  142. else if ( new_addr > addr && new_addr < (addr + size) ) {
  143. if ( !insert_after_region(e820map, nr_map, i, new_addr, new_size,
  144. new_type) )
  145. return false;
  146. /* fixup current region */
  147. tmp_addr = e820_base_64(&e820map[i]);
  148. split64b(new_addr - tmp_addr, &(e820map[i].length_low),
  149. &(e820map[i].length_high));
  150. i++; /* adjust to always be that of our region */
  151. /* insert a copy of current region (before adj) after us so */
  152. /* that rest of code can be common with previous case */
  153. if ( !insert_after_region(e820map, nr_map, i, addr, size, type) )
  154. return false;
  155. break;
  156. }
  157. /* is our region in a gap in the map? */
  158. else if ( addr > new_addr ) {
  159. if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size,
  160. new_type) )
  161. return false;
  162. break;
  163. }
  164. }
  165. /* if we reached the end of the map without finding an overlapping */
  166. /* region, insert us at the end (note that this test won't trigger */
  167. /* for the second case above because the insert() will have incremented */
  168. /* nr_map and so i++ will still be less) */
  169. if ( i == *nr_map ) {
  170. if ( !insert_after_region(e820map, nr_map, i-1, new_addr, new_size,
  171. new_type) )
  172. return false;
  173. return true;
  174. }
  175. i++; /* move to entry after our inserted one (we're not at end yet) */
  176. tmp_addr = e820_base_64(&e820map[i]);
  177. tmp_size = e820_length_64(&e820map[i]);
  178. /* did we split the (formerly) previous region? */
  179. if ( (new_addr >= tmp_addr) &&
  180. ((new_addr + new_size) < (tmp_addr + tmp_size)) ) {
  181. /* then adjust the current region (adj size first) */
  182. split64b((tmp_addr + tmp_size) - (new_addr + new_size),
  183. &(e820map[i].length_low), &(e820map[i].length_high));
  184. split64b(new_addr + new_size,
  185. &(e820map[i].base_addr_low), &(e820map[i].base_addr_high));
  186. return true;
  187. }
  188. /* if our region completely covers any existing regions, delete them */
  189. while ( (i < *nr_map) && ((new_addr + new_size) >=
  190. (tmp_addr + tmp_size)) ) {
  191. remove_region(e820map, nr_map, i);
  192. tmp_addr = e820_base_64(&e820map[i]);
  193. tmp_size = e820_length_64(&e820map[i]);
  194. }
  195. /* finally, if our region partially overlaps an existing region, */
  196. /* then truncate the existing region */
  197. if ( i < *nr_map ) {
  198. tmp_addr = e820_base_64(&e820map[i]);
  199. tmp_size = e820_length_64(&e820map[i]);
  200. if ( (new_addr + new_size) > tmp_addr ) {
  201. split64b((tmp_addr + tmp_size) - (new_addr + new_size),
  202. &(e820map[i].length_low), &(e820map[i].length_high));
  203. split64b(new_addr + new_size, &(e820map[i].base_addr_low),
  204. &(e820map[i].base_addr_high));
  205. }
  206. }
  207. return true;
  208. }
  209. /*
  210. * is_overlapped
  211. *
  212. * Detect whether two ranges are overlapped.
  213. *
  214. * return: true = overlapped
  215. */
  216. static bool is_overlapped(uint64_t base, uint64_t end, uint64_t e820_base,
  217. uint64_t e820_end)
  218. {
  219. uint64_t length = end - base, e820_length = e820_end - e820_base;
  220. uint64_t min, max;
  221. min = (base < e820_base)?base:e820_base;
  222. max = (end > e820_end)?end:e820_end;
  223. /* overlapping */
  224. if ( (max - min) < (length + e820_length) )
  225. return true;
  226. if ( (max - min) == (length + e820_length)
  227. && ( ((length == 0) && (base > e820_base) && (base < e820_end))
  228. || ((e820_length == 0) && (e820_base > base) &&
  229. (e820_base < end)) ) )
  230. return true;
  231. return false;
  232. }
  233. /*
  234. * copy_e820_map
  235. *
  236. * Copies the raw e820 map from bootloader to new table with room for expansion
  237. *
  238. * return: false = error (no table or table too big for new space)
  239. */
  240. bool copy_e820_map(const multiboot_info_t *mbi)
  241. {
  242. g_nr_map = 0;
  243. if ( mbi->flags & MBI_MEMMAP ) {
  244. printk("original e820 map:\n");
  245. print_map((memory_map_t *)mbi->mmap_addr,
  246. mbi->mmap_length/sizeof(memory_map_t));
  247. uint32_t entry_offset = 0;
  248. while ( entry_offset < mbi->mmap_length &&
  249. g_nr_map < MAX_E820_ENTRIES ) {
  250. memory_map_t *entry = (memory_map_t *)
  251. (mbi->mmap_addr + entry_offset);
  252. /* we want to support unordered and/or overlapping entries */
  253. /* so use protect_region() to insert into existing map, since */
  254. /* it handles these cases */
  255. if ( !protect_region(g_copy_e820_map, &g_nr_map,
  256. e820_base_64(entry), e820_length_64(entry),
  257. entry->type) )
  258. return false;
  259. entry_offset += entry->size + sizeof(entry->size);
  260. }
  261. if ( g_nr_map == MAX_E820_ENTRIES ) {
  262. printk("Too many e820 entries\n");
  263. return false;
  264. }
  265. }
  266. else if ( mbi->flags & MBI_MEMLIMITS ) {
  267. printk("no e820 map, mem_lower=%x, mem_upper=%x\n",
  268. mbi->mem_lower, mbi->mem_upper);
  269. /* lower limit is 0x00000000 - <mem_lower>*0x400 (i.e. in kb) */
  270. g_copy_e820_map[0].base_addr_low = 0;
  271. g_copy_e820_map[0].base_addr_high = 0;
  272. g_copy_e820_map[0].length_low = mbi->mem_lower << 10;
  273. g_copy_e820_map[0].length_high = 0;
  274. g_copy_e820_map[0].type = E820_RAM;
  275. g_copy_e820_map[0].size = sizeof(memory_map_t) - sizeof(uint32_t);
  276. /* upper limit is 0x00100000 - <mem_upper>*0x400 */
  277. g_copy_e820_map[1].base_addr_low = 0x100000;
  278. g_copy_e820_map[1].base_addr_high = 0;
  279. split64b((uint64_t)mbi->mem_upper << 10,
  280. &(g_copy_e820_map[1].length_low),
  281. &(g_copy_e820_map[1].length_high));
  282. g_copy_e820_map[1].type = E820_RAM;
  283. g_copy_e820_map[1].size = sizeof(memory_map_t) - sizeof(uint32_t);
  284. g_nr_map = 2;
  285. }
  286. else {
  287. printk("no e820 map nor memory limits provided\n");
  288. return false;
  289. }
  290. return true;
  291. }
  292. void replace_e820_map(multiboot_info_t *mbi)
  293. {
  294. /* replace original with the copy */
  295. mbi->mmap_addr = (uint32_t)g_copy_e820_map;
  296. mbi->mmap_length = g_nr_map * sizeof(memory_map_t);
  297. mbi->flags |= MBI_MEMMAP; /* in case only MBI_MEMLIMITS was set */
  298. }
  299. bool e820_protect_region(uint64_t addr, uint64_t size, uint32_t type)
  300. {
  301. return protect_region(g_copy_e820_map, &g_nr_map, addr, size, type);
  302. }
  303. /*
  304. * e820_check_region
  305. *
  306. * Given a range, check which kind of range it covers
  307. *
  308. * return: E820_GAP, it covers gap in e820 map;
  309. * E820_MIXED, it covers at least two different kinds of ranges;
  310. * E820_XXX, it covers E820_XXX range only;
  311. * it will not return 0.
  312. */
  313. uint32_t e820_check_region(uint64_t base, uint64_t length)
  314. {
  315. memory_map_t* e820_entry;
  316. uint64_t end = base + length, e820_base, e820_end, e820_length;
  317. uint32_t type;
  318. uint32_t ret = 0;
  319. bool gap = true; /* suppose there is always a virtual gap at first */
  320. e820_base = 0;
  321. e820_length = 0;
  322. for ( unsigned int i = 0; i < g_nr_map; i = gap ? i : i+1, gap = !gap ) {
  323. e820_entry = &g_copy_e820_map[i];
  324. if ( gap ) {
  325. /* deal with the gap in e820 map */
  326. e820_base = e820_base + e820_length;
  327. e820_length = e820_base_64(e820_entry) - e820_base;
  328. type = E820_GAP;
  329. }
  330. else {
  331. /* deal with the normal item in e820 map */
  332. e820_base = e820_base_64(e820_entry);
  333. e820_length = e820_length_64(e820_entry);
  334. type = e820_entry->type;
  335. }
  336. if ( e820_length == 0 )
  337. continue; /* if the range is zero, then skip */
  338. e820_end = e820_base + e820_length;
  339. if ( !is_overlapped(base, end, e820_base, e820_end) )
  340. continue; /* if no overlapping, then skip */
  341. /* if the value of ret is not assigned before,
  342. then set ret to type directly */
  343. if ( ret == 0 ) {
  344. ret = type;
  345. continue;
  346. }
  347. /* if the value of ret is assigned before but ret is equal to type,
  348. then no need to do anything */
  349. if ( ret == type )
  350. continue;
  351. /* if the value of ret is assigned before but it is GAP,
  352. then no need to do anything since any type merged with GAP is GAP */
  353. if ( ret == E820_GAP )
  354. continue;
  355. /* if the value of ret is assigned before but it is not GAP and type
  356. is GAP now this time, then set ret to GAP since any type merged
  357. with GAP is GAP. */
  358. if ( type == E820_GAP ) {
  359. ret = E820_GAP;
  360. continue;
  361. }
  362. /* if the value of ret is assigned before but both ret and type are
  363. not GAP and their values are not equal, then set ret to MIXED
  364. since any two non-GAP values are merged into MIXED if they are
  365. not equal. */
  366. ret = E820_MIXED;
  367. }
  368. /* deal with the last gap */
  369. if ( is_overlapped(base, end, e820_base + e820_length, (uint64_t)-1) )
  370. ret = E820_GAP;
  371. /* print the result */
  372. printk(" (range from %016Lx to %016Lx is in ", base, base + length);
  373. switch (ret) {
  374. case E820_RAM:
  375. printk("E820_RAM)\n"); break;
  376. case E820_RESERVED:
  377. printk("E820_RESERVED)\n"); break;
  378. case E820_ACPI:
  379. printk("E820_ACPI)\n"); break;
  380. case E820_NVS:
  381. printk("E820_NVS)\n"); break;
  382. case E820_UNUSABLE:
  383. printk("E820_UNUSABLE)\n"); break;
  384. case E820_GAP:
  385. printk("E820_GAP)\n"); break;
  386. case E820_MIXED:
  387. printk("E820_MIXED)\n"); break;
  388. default:
  389. printk("UNKNOWN)\n");
  390. }
  391. return ret;
  392. }
  393. /*
  394. * e820_reserve_ram
  395. *
  396. * Given the range, any ram range in e820 is in it, change type to reserved.
  397. *
  398. * return: false = error
  399. */
  400. bool e820_reserve_ram(uint64_t base, uint64_t length)
  401. {
  402. memory_map_t* e820_entry;
  403. uint64_t e820_base, e820_length, e820_end;
  404. uint64_t end;
  405. if ( length == 0 )
  406. return true;
  407. end = base + length;
  408. /* find where our region should cover the ram in e820 */
  409. for ( unsigned int i = 0; i < g_nr_map; i++ ) {
  410. e820_entry = &g_copy_e820_map[i];
  411. e820_base = e820_base_64(e820_entry);
  412. e820_length = e820_length_64(e820_entry);
  413. e820_end = e820_base + e820_length;
  414. /* if not ram, no need to deal with */
  415. if ( e820_entry->type != E820_RAM )
  416. continue;
  417. /* if the range is before the current ram range, skip the ram range */
  418. if ( end <= e820_base )
  419. continue;
  420. /* if the range is after the current ram range, skip the ram range */
  421. if ( base >= e820_end )
  422. continue;
  423. /* case 1: the current ram range is within the range:
  424. base, e820_base, e820_end, end */
  425. if ( (base <= e820_base) && (e820_end <= end) )
  426. e820_entry->type = E820_RESERVED;
  427. /* case 2: overlapping:
  428. base, e820_base, end, e820_end */
  429. else if ( (e820_base >= base) && (end > e820_base) &&
  430. (e820_end > end) ) {
  431. /* split the current ram map */
  432. if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i-1,
  433. e820_base, (end - e820_base),
  434. E820_RESERVED) )
  435. return false;
  436. /* fixup the current ram map */
  437. i++;
  438. split64b(end, &(g_copy_e820_map[i].base_addr_low),
  439. &(g_copy_e820_map[i].base_addr_high));
  440. split64b(e820_end - end, &(g_copy_e820_map[i].length_low),
  441. &(g_copy_e820_map[i].length_high));
  442. /* no need to check more */
  443. break;
  444. }
  445. /* case 3: overlapping:
  446. e820_base, base, e820_end, end */
  447. else if ( (base > e820_base) && (e820_end > base) &&
  448. (end >= e820_end) ) {
  449. /* fixup the current ram map */
  450. split64b((base - e820_base), &(g_copy_e820_map[i].length_low),
  451. &(g_copy_e820_map[i].length_high));
  452. /* split the current ram map */
  453. if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, base,
  454. (e820_end - base), E820_RESERVED) )
  455. return false;
  456. i++;
  457. }
  458. /* case 4: the range is within the current ram range:
  459. e820_base, base, end, e820_end */
  460. else if ( (base > e820_base) && (e820_end > end) ) {
  461. /* fixup the current ram map */
  462. split64b((base - e820_base), &(g_copy_e820_map[i].length_low),
  463. &(g_copy_e820_map[i].length_high));
  464. /* split the current ram map */
  465. if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, base,
  466. length, E820_RESERVED) )
  467. return false;
  468. i++;
  469. /* fixup the rest of the current ram map */
  470. if ( !insert_after_region(g_copy_e820_map, &g_nr_map, i, end,
  471. (e820_end - end), e820_entry->type) )
  472. return false;
  473. i++;
  474. /* no need to check more */
  475. break;
  476. }
  477. else {
  478. printk("we should never get here\n");
  479. return false;
  480. }
  481. }
  482. return true;
  483. }
  484. void print_e820_map(void)
  485. {
  486. print_map(g_copy_e820_map, g_nr_map);
  487. }
  488. bool get_ram_ranges(uint64_t *min_lo_ram, uint64_t *max_lo_ram,
  489. uint64_t *min_hi_ram, uint64_t *max_hi_ram)
  490. {
  491. if ( min_lo_ram == NULL || max_lo_ram == NULL ||
  492. min_hi_ram == NULL || max_hi_ram == NULL )
  493. return false;
  494. *min_lo_ram = *min_hi_ram = ~0ULL;
  495. *max_lo_ram = *max_hi_ram = 0;
  496. bool found_reserved_region = false;
  497. for ( unsigned int i = 0; i < g_nr_map; i++ ) {
  498. memory_map_t *entry = &g_copy_e820_map[i];
  499. uint64_t base = e820_base_64(entry);
  500. uint64_t limit = base + e820_length_64(entry);
  501. if ( entry->type == E820_RAM ) {
  502. /* if range straddles 4GB boundary, that is an error */
  503. if ( base < 0x100000000ULL && limit > 0x100000000ULL ) {
  504. printk("e820 memory range straddles 4GB boundary\n");
  505. return false;
  506. }
  507. /*
  508. * some BIOSes put legacy USB buffers in reserved regions <4GB,
  509. * which if DMA protected cause SMM to hang, so make sure that
  510. * we don't overlap any of these even if that wastes RAM
  511. */
  512. if ( !found_reserved_region ) {
  513. if ( base < 0x100000000ULL && base < *min_lo_ram )
  514. *min_lo_ram = base;
  515. if ( limit <= 0x100000000ULL && limit > *max_lo_ram )
  516. *max_lo_ram = limit;
  517. }
  518. else { /* need to reserve low RAM above reserved regions */
  519. if ( base < 0x100000000ULL ) {
  520. printk("discarding RAM above reserved regions: 0x%Lx - 0x%Lx\n", base, limit);
  521. if ( !e820_reserve_ram(base, limit - base) )
  522. return false;
  523. }
  524. }
  525. if ( base >= 0x100000000ULL && base < *min_hi_ram )
  526. *min_hi_ram = base;
  527. if ( limit > 0x100000000ULL && limit > *max_hi_ram )
  528. *max_hi_ram = limit;
  529. }
  530. else {
  531. /* parts of low memory may be reserved for cseg, ISA hole,
  532. etc. but these seem OK to DMA protect, so ignore reserved
  533. regions <0x100000 */
  534. if ( *min_lo_ram != ~0ULL && limit > 0x100000ULL )
  535. found_reserved_region = true;
  536. }
  537. }
  538. /* no low RAM found */
  539. if ( *min_lo_ram >= *max_lo_ram ) {
  540. printk("no low ram in e820 map\n");
  541. return false;
  542. }
  543. /* no high RAM found */
  544. if ( *min_hi_ram >= *max_hi_ram )
  545. *min_hi_ram = *max_hi_ram = 0;
  546. return true;
  547. }
  548. /* find highest (< <limit>) RAM region of at least <size> bytes */
  549. void get_highest_sized_ram(uint64_t size, uint64_t limit,
  550. uint64_t *ram_base, uint64_t *ram_size)
  551. {
  552. uint64_t last_fit_base = 0, last_fit_size = 0;
  553. if ( ram_base == NULL || ram_size == NULL )
  554. return;
  555. for ( unsigned int i = 0; i < g_nr_map; i++ ) {
  556. memory_map_t *entry = &g_copy_e820_map[i];
  557. if ( entry->type == E820_RAM ) {
  558. uint64_t base = e820_base_64(entry);
  559. uint64_t length = e820_length_64(entry);
  560. /* over 4GB so use the last region that fit */
  561. if ( base + length > limit )
  562. break;
  563. if ( size <= length ) {
  564. last_fit_base = base;
  565. last_fit_size = length;
  566. }
  567. }
  568. }
  569. *ram_base = last_fit_base;
  570. *ram_size = last_fit_size;
  571. }
  572. /*
  573. * Local variables:
  574. * mode: C
  575. * c-set-style: "BSD"
  576. * c-basic-offset: 4
  577. * tab-width: 4
  578. * indent-tabs-mode: nil
  579. * End:
  580. */