PageRenderTime 53ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/dex2oat/linker/arm/relative_patcher_arm_base.cc

https://gitlab.com/androidopensourceproject/platform-art
C++ | 551 lines | 443 code | 46 blank | 62 comment | 96 complexity | 6ec42efb1acf8fed0fab190388885d3a MD5 | raw file
  1. /*
  2. * Copyright (C) 2015 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "linker/arm/relative_patcher_arm_base.h"
  17. #include "base/stl_util.h"
  18. #include "compiled_method-inl.h"
  19. #include "debug/method_debug_info.h"
  20. #include "dex/dex_file_types.h"
  21. #include "linker/linker_patch.h"
  22. #include "oat.h"
  23. #include "oat_quick_method_header.h"
  24. #include "stream/output_stream.h"
  25. namespace art {
  26. namespace linker {
  27. class ArmBaseRelativePatcher::ThunkData {
  28. public:
  29. ThunkData(ArrayRef<const uint8_t> code, const std::string& debug_name, uint32_t max_next_offset)
  30. : code_(code),
  31. debug_name_(debug_name),
  32. offsets_(),
  33. max_next_offset_(max_next_offset),
  34. pending_offset_(0u) {
  35. DCHECK(NeedsNextThunk()); // The data is constructed only when we expect to need the thunk.
  36. }
  37. ThunkData(ThunkData&& src) = default;
  38. size_t CodeSize() const {
  39. return code_.size();
  40. }
  41. ArrayRef<const uint8_t> GetCode() const {
  42. return code_;
  43. }
  44. const std::string& GetDebugName() const {
  45. return debug_name_;
  46. }
  47. bool NeedsNextThunk() const {
  48. return max_next_offset_ != 0u;
  49. }
  50. uint32_t MaxNextOffset() const {
  51. DCHECK(NeedsNextThunk());
  52. return max_next_offset_;
  53. }
  54. void ClearMaxNextOffset() {
  55. DCHECK(NeedsNextThunk());
  56. max_next_offset_ = 0u;
  57. }
  58. void SetMaxNextOffset(uint32_t max_next_offset) {
  59. DCHECK(!NeedsNextThunk());
  60. max_next_offset_ = max_next_offset;
  61. }
  62. // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk.
  63. // Returns true if it was adjusted, false if the old value was kept.
  64. bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) {
  65. DCHECK(NeedsNextThunk());
  66. DCHECK(next_thunk.NeedsNextThunk());
  67. DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment);
  68. DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment);
  69. if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) {
  70. max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment);
  71. return true;
  72. } else {
  73. return false;
  74. }
  75. }
  76. uint32_t ReserveOffset(size_t offset) {
  77. DCHECK(NeedsNextThunk());
  78. DCHECK_LE(offset, max_next_offset_);
  79. max_next_offset_ = 0u; // The reserved offset should satisfy all pending references.
  80. offsets_.push_back(offset);
  81. return offset + CodeSize();
  82. }
  83. bool HasReservedOffset() const {
  84. return !offsets_.empty();
  85. }
  86. uint32_t LastReservedOffset() const {
  87. DCHECK(HasReservedOffset());
  88. return offsets_.back();
  89. }
  90. bool HasPendingOffset() const {
  91. return pending_offset_ != offsets_.size();
  92. }
  93. uint32_t GetPendingOffset() const {
  94. DCHECK(HasPendingOffset());
  95. return offsets_[pending_offset_];
  96. }
  97. void MarkPendingOffsetAsWritten() {
  98. DCHECK(HasPendingOffset());
  99. ++pending_offset_;
  100. }
  101. bool HasWrittenOffset() const {
  102. return pending_offset_ != 0u;
  103. }
  104. uint32_t LastWrittenOffset() const {
  105. DCHECK(HasWrittenOffset());
  106. return offsets_[pending_offset_ - 1u];
  107. }
  108. size_t IndexOfFirstThunkAtOrAfter(uint32_t offset) const {
  109. size_t number_of_thunks = NumberOfThunks();
  110. for (size_t i = 0; i != number_of_thunks; ++i) {
  111. if (GetThunkOffset(i) >= offset) {
  112. return i;
  113. }
  114. }
  115. return number_of_thunks;
  116. }
  117. size_t NumberOfThunks() const {
  118. return offsets_.size();
  119. }
  120. uint32_t GetThunkOffset(size_t index) const {
  121. DCHECK_LT(index, NumberOfThunks());
  122. return offsets_[index];
  123. }
  124. private:
  125. const ArrayRef<const uint8_t> code_; // The code of the thunk.
  126. const std::string debug_name_; // The debug name of the thunk.
  127. std::vector<uint32_t> offsets_; // Offsets at which the thunk needs to be written.
  128. uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed.
  129. uint32_t pending_offset_; // The index of the next offset to write.
  130. };
  131. class ArmBaseRelativePatcher::PendingThunkComparator {
  132. public:
  133. bool operator()(const ThunkData* lhs, const ThunkData* rhs) const {
  134. DCHECK(lhs->HasPendingOffset());
  135. DCHECK(rhs->HasPendingOffset());
  136. // The top of the heap is defined to contain the highest element and we want to pick
  137. // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">".
  138. return lhs->GetPendingOffset() > rhs->GetPendingOffset();
  139. }
  140. };
  141. uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
  142. const CompiledMethod* compiled_method,
  143. MethodReference method_ref) {
  144. return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
  145. }
  146. uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
  147. // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file.
  148. // Since we do not know here whether this is the last file or whether the next opportunity
  149. // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for
  150. // subsequent oat files can still call back to them.
  151. if (!unprocessed_method_call_patches_.empty()) {
  152. ResolveMethodCalls(offset, MethodReference(nullptr, dex::kDexNoIndex));
  153. }
  154. for (ThunkData* data : unreserved_thunks_) {
  155. uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
  156. offset = data->ReserveOffset(thunk_offset);
  157. }
  158. unreserved_thunks_.clear();
  159. // We also need to delay initiating the pending_thunks_ until the call to WriteThunks().
  160. // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place.
  161. DCHECK_EQ(pending_thunks_.capacity(), 0u);
  162. return offset;
  163. }
  164. uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
  165. if (pending_thunks_.capacity() == 0u) {
  166. if (thunks_.empty()) {
  167. return offset;
  168. }
  169. // First call to WriteThunks(), prepare the thunks for writing.
  170. pending_thunks_.reserve(thunks_.size());
  171. for (auto& entry : thunks_) {
  172. ThunkData* data = &entry.second;
  173. if (data->HasPendingOffset()) {
  174. pending_thunks_.push_back(data);
  175. }
  176. }
  177. std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
  178. }
  179. uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
  180. while (!pending_thunks_.empty() &&
  181. pending_thunks_.front()->GetPendingOffset() == aligned_offset) {
  182. // Write alignment bytes and code.
  183. uint32_t aligned_code_delta = aligned_offset - offset;
  184. if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) {
  185. return 0u;
  186. }
  187. if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) {
  188. return 0u;
  189. }
  190. offset = aligned_offset + pending_thunks_.front()->CodeSize();
  191. // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap.
  192. std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
  193. pending_thunks_.back()->MarkPendingOffsetAsWritten();
  194. if (pending_thunks_.back()->HasPendingOffset()) {
  195. std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
  196. } else {
  197. pending_thunks_.pop_back();
  198. }
  199. aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
  200. }
  201. DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset);
  202. return offset;
  203. }
  204. std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugInfo(
  205. uint32_t executable_offset) {
  206. // For multi-oat compilation (boot image), `thunks_` records thunks for all oat files.
  207. // To return debug info for the current oat file, we must ignore thunks before the
  208. // `executable_offset` as they are in the previous oat files and this function must be
  209. // called before reserving thunk positions for subsequent oat files.
  210. size_t number_of_thunks = 0u;
  211. for (auto&& entry : thunks_) {
  212. const ThunkData& data = entry.second;
  213. number_of_thunks += data.NumberOfThunks() - data.IndexOfFirstThunkAtOrAfter(executable_offset);
  214. }
  215. std::vector<debug::MethodDebugInfo> result;
  216. result.reserve(number_of_thunks);
  217. for (auto&& entry : thunks_) {
  218. const ThunkData& data = entry.second;
  219. size_t start = data.IndexOfFirstThunkAtOrAfter(executable_offset);
  220. if (start == data.NumberOfThunks()) {
  221. continue;
  222. }
  223. // Get the base name to use for the first occurrence of the thunk.
  224. const std::string& base_name = data.GetDebugName();
  225. for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
  226. debug::MethodDebugInfo info = {};
  227. if (i == 0u) {
  228. info.custom_name = base_name;
  229. } else {
  230. // Add a disambiguating tag for subsequent identical thunks. Since the `thunks_`
  231. // keeps records also for thunks in previous oat files, names based on the thunk
  232. // index shall be unique across the whole multi-oat output.
  233. info.custom_name = base_name + "_" + std::to_string(i);
  234. }
  235. info.isa = instruction_set_;
  236. info.is_code_address_text_relative = true;
  237. info.code_address = data.GetThunkOffset(i) - executable_offset;
  238. info.code_size = data.CodeSize();
  239. result.push_back(std::move(info));
  240. }
  241. }
  242. return result;
  243. }
  244. ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
  245. RelativePatcherTargetProvider* target_provider,
  246. InstructionSet instruction_set)
  247. : thunk_provider_(thunk_provider),
  248. target_provider_(target_provider),
  249. instruction_set_(instruction_set),
  250. thunks_(),
  251. unprocessed_method_call_patches_(),
  252. method_call_thunk_(nullptr),
  253. pending_thunks_() {
  254. }
  255. ArmBaseRelativePatcher::~ArmBaseRelativePatcher() {
  256. // All work done by member destructors.
  257. }
  258. uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
  259. const CompiledMethod* compiled_method,
  260. MethodReference method_ref,
  261. uint32_t max_extra_space) {
  262. // Adjust code size for extra space required by the subclass.
  263. uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space;
  264. uint32_t code_offset;
  265. uint32_t next_aligned_offset;
  266. while (true) {
  267. code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
  268. next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size);
  269. if (unreserved_thunks_.empty() ||
  270. unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
  271. break;
  272. }
  273. ThunkData* thunk = unreserved_thunks_.front();
  274. if (thunk == method_call_thunk_) {
  275. ResolveMethodCalls(code_offset, method_ref);
  276. // This may have changed `method_call_thunk_` data, so re-check if we need to reserve.
  277. if (unreserved_thunks_.empty() ||
  278. unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
  279. break;
  280. }
  281. // We need to process the new `front()` whether it's still the `method_call_thunk_` or not.
  282. thunk = unreserved_thunks_.front();
  283. }
  284. unreserved_thunks_.pop_front();
  285. uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
  286. offset = thunk->ReserveOffset(thunk_offset);
  287. if (thunk == method_call_thunk_) {
  288. // All remaining method call patches will be handled by this thunk.
  289. DCHECK(!unprocessed_method_call_patches_.empty());
  290. DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(),
  291. MaxPositiveDisplacement(GetMethodCallKey()));
  292. unprocessed_method_call_patches_.clear();
  293. }
  294. }
  295. // Process patches and check that adding thunks for the current method did not push any
  296. // thunks (previously existing or newly added) before `next_aligned_offset`. This is
  297. // essentially a check that we never compile a method that's too big. The calls or branches
  298. // from the method should be able to reach beyond the end of the method and over any pending
  299. // thunks. (The number of different thunks should be relatively low and their code short.)
  300. ProcessPatches(compiled_method, code_offset);
  301. CHECK(unreserved_thunks_.empty() ||
  302. unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset);
  303. return offset;
  304. }
  305. uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset,
  306. uint32_t target_offset) {
  307. DCHECK(method_call_thunk_ != nullptr);
  308. // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
  309. uint32_t displacement = target_offset - patch_offset;
  310. uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
  311. uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
  312. // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
  313. if (displacement > max_positive_displacement && displacement < -max_negative_displacement) {
  314. // Unwritten thunks have higher offsets, check if it's within range.
  315. DCHECK(!method_call_thunk_->HasPendingOffset() ||
  316. method_call_thunk_->GetPendingOffset() > patch_offset);
  317. if (method_call_thunk_->HasPendingOffset() &&
  318. method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) {
  319. displacement = method_call_thunk_->GetPendingOffset() - patch_offset;
  320. } else {
  321. // We must have a previous thunk then.
  322. DCHECK(method_call_thunk_->HasWrittenOffset());
  323. DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset);
  324. displacement = method_call_thunk_->LastWrittenOffset() - patch_offset;
  325. DCHECK_GE(displacement, -max_negative_displacement);
  326. }
  327. }
  328. return displacement;
  329. }
  330. uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) {
  331. auto it = thunks_.find(key);
  332. CHECK(it != thunks_.end());
  333. const ThunkData& data = it->second;
  334. if (data.HasWrittenOffset()) {
  335. uint32_t offset = data.LastWrittenOffset();
  336. DCHECK_LT(offset, patch_offset);
  337. if (patch_offset - offset <= MaxNegativeDisplacement(key)) {
  338. return offset;
  339. }
  340. }
  341. DCHECK(data.HasPendingOffset());
  342. uint32_t offset = data.GetPendingOffset();
  343. DCHECK_GT(offset, patch_offset);
  344. DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key));
  345. return offset;
  346. }
  347. ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetMethodCallKey() {
  348. return ThunkKey(ThunkType::kMethodCall);
  349. }
  350. ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetEntrypointCallKey(
  351. const LinkerPatch& patch) {
  352. DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kCallEntrypoint);
  353. return ThunkKey(ThunkType::kEntrypointCall, patch.EntrypointOffset());
  354. }
  355. ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey(
  356. const LinkerPatch& patch) {
  357. DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch);
  358. return ThunkKey(ThunkType::kBakerReadBarrier,
  359. patch.GetBakerCustomValue1(),
  360. patch.GetBakerCustomValue2());
  361. }
  362. void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method,
  363. uint32_t code_offset) {
  364. for (const LinkerPatch& patch : compiled_method->GetPatches()) {
  365. uint32_t patch_offset = code_offset + patch.LiteralOffset();
  366. ThunkKey key(static_cast<ThunkType>(-1));
  367. bool simple_thunk_patch = false;
  368. ThunkData* old_data = nullptr;
  369. if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
  370. key = GetMethodCallKey();
  371. unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
  372. if (method_call_thunk_ == nullptr) {
  373. uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
  374. auto it = thunks_.Put(key, ThunkDataForPatch(patch, max_next_offset));
  375. method_call_thunk_ = &it->second;
  376. AddUnreservedThunk(method_call_thunk_);
  377. } else {
  378. old_data = method_call_thunk_;
  379. }
  380. } else if (patch.GetType() == LinkerPatch::Type::kCallEntrypoint) {
  381. key = GetEntrypointCallKey(patch);
  382. simple_thunk_patch = true;
  383. } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
  384. key = GetBakerThunkKey(patch);
  385. simple_thunk_patch = true;
  386. }
  387. if (simple_thunk_patch) {
  388. auto lb = thunks_.lower_bound(key);
  389. if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
  390. uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
  391. auto it = thunks_.PutBefore(lb, key, ThunkDataForPatch(patch, max_next_offset));
  392. AddUnreservedThunk(&it->second);
  393. } else {
  394. old_data = &lb->second;
  395. }
  396. }
  397. if (old_data != nullptr) {
  398. // Shared path where an old thunk may need an update.
  399. DCHECK(key.GetType() != static_cast<ThunkType>(-1));
  400. DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset);
  401. if (old_data->NeedsNextThunk()) {
  402. // Patches for a method are ordered by literal offset, so if we still need to place
  403. // this thunk for a previous patch, that thunk shall be in range for this patch.
  404. DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key));
  405. } else {
  406. if (!old_data->HasReservedOffset() ||
  407. patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key)) {
  408. old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key));
  409. AddUnreservedThunk(old_data);
  410. }
  411. }
  412. }
  413. }
  414. }
  415. void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) {
  416. DCHECK(data->NeedsNextThunk());
  417. size_t index = unreserved_thunks_.size();
  418. while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) {
  419. --index;
  420. }
  421. unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data);
  422. // We may need to update the max next offset(s) if the thunk code would not fit.
  423. size_t alignment = GetInstructionSetAlignment(instruction_set_);
  424. if (index + 1u != unreserved_thunks_.size()) {
  425. // Note: Ignore the return value as we need to process previous thunks regardless.
  426. data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment);
  427. }
  428. // Make space for previous thunks. Once we find a pending thunk that does
  429. // not need an adjustment, we can stop.
  430. while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) {
  431. --index;
  432. data = unreserved_thunks_[index];
  433. }
  434. }
  435. void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
  436. MethodReference method_ref) {
  437. DCHECK(!unreserved_thunks_.empty());
  438. DCHECK(!unprocessed_method_call_patches_.empty());
  439. DCHECK(method_call_thunk_ != nullptr);
  440. uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
  441. uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
  442. // Process as many patches as possible, stop only on unresolved targets or calls too far back.
  443. while (!unprocessed_method_call_patches_.empty()) {
  444. MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod();
  445. uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset();
  446. DCHECK(!method_call_thunk_->HasReservedOffset() ||
  447. method_call_thunk_->LastReservedOffset() <= patch_offset);
  448. if (!method_call_thunk_->HasReservedOffset() ||
  449. patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) {
  450. // No previous thunk in range, check if we can reach the target directly.
  451. if (target_method == method_ref) {
  452. DCHECK_GT(quick_code_offset, patch_offset);
  453. if (quick_code_offset - patch_offset > max_positive_displacement) {
  454. break;
  455. }
  456. } else {
  457. auto result = target_provider_->FindMethodOffset(target_method);
  458. if (!result.first) {
  459. break;
  460. }
  461. uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
  462. if (target_offset >= patch_offset) {
  463. DCHECK_LE(target_offset - patch_offset, max_positive_displacement);
  464. } else if (patch_offset - target_offset > max_negative_displacement) {
  465. break;
  466. }
  467. }
  468. }
  469. unprocessed_method_call_patches_.pop_front();
  470. }
  471. if (!unprocessed_method_call_patches_.empty()) {
  472. // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if
  473. // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps.
  474. uint32_t new_max_next_offset =
  475. unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement;
  476. if (new_max_next_offset >
  477. unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) {
  478. method_call_thunk_->ClearMaxNextOffset();
  479. method_call_thunk_->SetMaxNextOffset(new_max_next_offset);
  480. if (method_call_thunk_ != unreserved_thunks_.back()) {
  481. RemoveElement(unreserved_thunks_, method_call_thunk_);
  482. unreserved_thunks_.push_back(method_call_thunk_);
  483. }
  484. }
  485. } else {
  486. // We have resolved all method calls, we do not need a new thunk anymore.
  487. method_call_thunk_->ClearMaxNextOffset();
  488. RemoveElement(unreserved_thunks_, method_call_thunk_);
  489. }
  490. }
  491. inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset,
  492. const ThunkKey& key) {
  493. return RoundDown(patch_offset + MaxPositiveDisplacement(key),
  494. GetInstructionSetAlignment(instruction_set_));
  495. }
  496. inline ArmBaseRelativePatcher::ThunkData ArmBaseRelativePatcher::ThunkDataForPatch(
  497. const LinkerPatch& patch, uint32_t max_next_offset) {
  498. ArrayRef<const uint8_t> code;
  499. std::string debug_name;
  500. thunk_provider_->GetThunkCode(patch, &code, &debug_name);
  501. DCHECK(!code.empty());
  502. return ThunkData(code, debug_name, max_next_offset);
  503. }
  504. } // namespace linker
  505. } // namespace art