PageRenderTime 47ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/libutils/VectorImpl.cpp

https://gitlab.com/infraredbg/android_system_core-mt6589
C++ | 695 lines | 551 code | 88 blank | 56 comment | 101 complexity | 5b8bde9cd3d906751f96d9ca6fddcce0 MD5 | raw file
  1. /*
  2. * Copyright (C) 2005 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "Vector"
  17. #include <string.h>
  18. #include <stdlib.h>
  19. #include <stdio.h>
  20. #include <cutils/log.h>
  21. #include <safe_iop.h>
  22. #include <utils/Errors.h>
  23. #include <utils/SharedBuffer.h>
  24. #include <utils/VectorImpl.h>
  25. /*****************************************************************************/
  26. namespace android {
  27. // ----------------------------------------------------------------------------
  28. const size_t kMinVectorCapacity = 4;
  29. static inline size_t max(size_t a, size_t b) {
  30. return a>b ? a : b;
  31. }
  32. // ----------------------------------------------------------------------------
  33. VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
  34. : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
  35. {
  36. }
  37. VectorImpl::VectorImpl(const VectorImpl& rhs)
  38. : mStorage(rhs.mStorage), mCount(rhs.mCount),
  39. mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
  40. {
  41. if (mStorage) {
  42. SharedBuffer::bufferFromData(mStorage)->acquire();
  43. }
  44. }
  45. VectorImpl::~VectorImpl()
  46. {
  47. ALOGW_IF(mCount,
  48. "[%p] subclasses of VectorImpl must call finish_vector()"
  49. " in their destructor. Leaking %d bytes.",
  50. this, (int)(mCount*mItemSize));
  51. // We can't call _do_destroy() here because the vtable is already gone.
  52. }
  53. VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
  54. {
  55. LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
  56. "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
  57. if (this != &rhs) {
  58. release_storage();
  59. if (rhs.mCount) {
  60. mStorage = rhs.mStorage;
  61. mCount = rhs.mCount;
  62. SharedBuffer::bufferFromData(mStorage)->acquire();
  63. } else {
  64. mStorage = 0;
  65. mCount = 0;
  66. }
  67. }
  68. return *this;
  69. }
  70. void* VectorImpl::editArrayImpl()
  71. {
  72. if (mStorage) {
  73. const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
  74. SharedBuffer* editable = sb->attemptEdit();
  75. if (editable == 0) {
  76. // If we're here, we're not the only owner of the buffer.
  77. // We must make a copy of it.
  78. editable = SharedBuffer::alloc(sb->size());
  79. // Fail instead of returning a pointer to storage that's not
  80. // editable. Otherwise we'd be editing the contents of a buffer
  81. // for which we're not the only owner, which is undefined behaviour.
  82. LOG_ALWAYS_FATAL_IF(editable == NULL);
  83. _do_copy(editable->data(), mStorage, mCount);
  84. release_storage();
  85. mStorage = editable->data();
  86. }
  87. }
  88. return mStorage;
  89. }
  90. size_t VectorImpl::capacity() const
  91. {
  92. if (mStorage) {
  93. return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
  94. }
  95. return 0;
  96. }
  97. ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
  98. {
  99. return insertArrayAt(vector.arrayImpl(), index, vector.size());
  100. }
  101. ssize_t VectorImpl::appendVector(const VectorImpl& vector)
  102. {
  103. return insertVectorAt(vector, size());
  104. }
  105. ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
  106. {
  107. if (index > size())
  108. return BAD_INDEX;
  109. void* where = _grow(index, length);
  110. if (where) {
  111. _do_copy(where, array, length);
  112. }
  113. return where ? index : (ssize_t)NO_MEMORY;
  114. }
  115. ssize_t VectorImpl::appendArray(const void* array, size_t length)
  116. {
  117. return insertArrayAt(array, size(), length);
  118. }
  119. ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
  120. {
  121. return insertAt(0, index, numItems);
  122. }
  123. ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
  124. {
  125. if (index > size())
  126. return BAD_INDEX;
  127. void* where = _grow(index, numItems);
  128. if (where) {
  129. if (item) {
  130. _do_splat(where, item, numItems);
  131. } else {
  132. _do_construct(where, numItems);
  133. }
  134. }
  135. return where ? index : (ssize_t)NO_MEMORY;
  136. }
  137. static int sortProxy(const void* lhs, const void* rhs, void* func)
  138. {
  139. return (*(VectorImpl::compar_t)func)(lhs, rhs);
  140. }
  141. status_t VectorImpl::sort(VectorImpl::compar_t cmp)
  142. {
  143. return sort(sortProxy, (void*)cmp);
  144. }
  145. status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
  146. {
  147. // the sort must be stable. we're using insertion sort which
  148. // is well suited for small and already sorted arrays
  149. // for big arrays, it could be better to use mergesort
  150. const ssize_t count = size();
  151. if (count > 1) {
  152. void* array = const_cast<void*>(arrayImpl());
  153. void* temp = 0;
  154. ssize_t i = 1;
  155. while (i < count) {
  156. void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
  157. void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
  158. if (cmp(curr, item, state) > 0) {
  159. if (!temp) {
  160. // we're going to have to modify the array...
  161. array = editArrayImpl();
  162. if (!array) return NO_MEMORY;
  163. temp = malloc(mItemSize);
  164. if (!temp) return NO_MEMORY;
  165. item = reinterpret_cast<char*>(array) + mItemSize*(i);
  166. curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
  167. } else {
  168. _do_destroy(temp, 1);
  169. }
  170. _do_copy(temp, item, 1);
  171. ssize_t j = i-1;
  172. void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
  173. do {
  174. _do_destroy(next, 1);
  175. _do_copy(next, curr, 1);
  176. next = curr;
  177. --j;
  178. curr = reinterpret_cast<char*>(array) + mItemSize*(j);
  179. } while (j>=0 && (cmp(curr, temp, state) > 0));
  180. _do_destroy(next, 1);
  181. _do_copy(next, temp, 1);
  182. }
  183. i++;
  184. }
  185. if (temp) {
  186. _do_destroy(temp, 1);
  187. free(temp);
  188. }
  189. }
  190. return NO_ERROR;
  191. }
  192. void VectorImpl::pop()
  193. {
  194. if (size())
  195. removeItemsAt(size()-1, 1);
  196. }
  197. void VectorImpl::push()
  198. {
  199. push(0);
  200. }
  201. void VectorImpl::push(const void* item)
  202. {
  203. insertAt(item, size());
  204. }
  205. ssize_t VectorImpl::add()
  206. {
  207. return add(0);
  208. }
  209. ssize_t VectorImpl::add(const void* item)
  210. {
  211. return insertAt(item, size());
  212. }
  213. ssize_t VectorImpl::replaceAt(size_t index)
  214. {
  215. return replaceAt(0, index);
  216. }
  217. ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
  218. {
  219. ALOG_ASSERT(index<size(),
  220. "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
  221. if (index >= size()) {
  222. return BAD_INDEX;
  223. }
  224. void* item = editItemLocation(index);
  225. if (item != prototype) {
  226. if (item == 0)
  227. return NO_MEMORY;
  228. _do_destroy(item, 1);
  229. if (prototype == 0) {
  230. _do_construct(item, 1);
  231. } else {
  232. _do_copy(item, prototype, 1);
  233. }
  234. }
  235. return ssize_t(index);
  236. }
  237. ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
  238. {
  239. ALOG_ASSERT((index+count)<=size(),
  240. "[%p] remove: index=%d, count=%d, size=%d",
  241. this, (int)index, (int)count, (int)size());
  242. if ((index+count) > size())
  243. return BAD_VALUE;
  244. _shrink(index, count);
  245. return index;
  246. }
  247. void VectorImpl::finish_vector()
  248. {
  249. release_storage();
  250. mStorage = 0;
  251. mCount = 0;
  252. }
  253. void VectorImpl::clear()
  254. {
  255. _shrink(0, mCount);
  256. }
  257. void* VectorImpl::editItemLocation(size_t index)
  258. {
  259. ALOG_ASSERT(index<capacity(),
  260. "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
  261. this, (int)index, (int)capacity(), (int)mCount);
  262. if (index < capacity()) {
  263. void* buffer = editArrayImpl();
  264. if (buffer) {
  265. return reinterpret_cast<char*>(buffer) + index*mItemSize;
  266. }
  267. }
  268. return 0;
  269. }
  270. const void* VectorImpl::itemLocation(size_t index) const
  271. {
  272. ALOG_ASSERT(index<capacity(),
  273. "[%p] itemLocation: index=%d, capacity=%d, count=%d",
  274. this, (int)index, (int)capacity(), (int)mCount);
  275. if (index < capacity()) {
  276. const void* buffer = arrayImpl();
  277. if (buffer) {
  278. return reinterpret_cast<const char*>(buffer) + index*mItemSize;
  279. }
  280. }
  281. return 0;
  282. }
  283. ssize_t VectorImpl::setCapacity(size_t new_capacity)
  284. {
  285. // The capacity must always be greater than or equal to the size
  286. // of this vector.
  287. if (new_capacity <= size()) {
  288. return capacity();
  289. }
  290. size_t new_allocation_size = 0;
  291. LOG_ALWAYS_FATAL_IF(!safe_mul(&new_allocation_size, new_capacity, mItemSize));
  292. SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
  293. if (sb) {
  294. void* array = sb->data();
  295. _do_copy(array, mStorage, size());
  296. release_storage();
  297. mStorage = const_cast<void*>(array);
  298. } else {
  299. return NO_MEMORY;
  300. }
  301. return new_capacity;
  302. }
  303. ssize_t VectorImpl::resize(size_t size) {
  304. ssize_t result = NO_ERROR;
  305. if (size > mCount) {
  306. result = insertAt(mCount, size - mCount);
  307. } else if (size < mCount) {
  308. result = removeItemsAt(size, mCount - size);
  309. }
  310. return result < 0 ? result : size;
  311. }
  312. void VectorImpl::release_storage()
  313. {
  314. if (mStorage) {
  315. const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
  316. if (sb->release(SharedBuffer::eKeepStorage) == 1) {
  317. _do_destroy(mStorage, mCount);
  318. SharedBuffer::dealloc(sb);
  319. }
  320. }
  321. }
  322. void* VectorImpl::_grow(size_t where, size_t amount)
  323. {
  324. // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
  325. // this, (int)where, (int)amount, (int)mCount, (int)capacity());
  326. ALOG_ASSERT(where <= mCount,
  327. "[%p] _grow: where=%d, amount=%d, count=%d",
  328. this, (int)where, (int)amount, (int)mCount); // caller already checked
  329. size_t new_size;
  330. LOG_ALWAYS_FATAL_IF(!safe_add(&new_size, mCount, amount), "new_size overflow");
  331. if (capacity() < new_size) {
  332. // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
  333. // (sigh..). Also note, the " + 1" was necessary to handle the special case
  334. // where x == 1, where the resized_capacity will be equal to the old
  335. // capacity without the +1. The old calculation wouldn't work properly
  336. // if x was zero.
  337. //
  338. // This approximates the old calculation, using (x + (x/2) + 1) instead.
  339. size_t new_capacity = 0;
  340. LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_size, (new_size / 2)),
  341. "new_capacity overflow");
  342. LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_capacity, static_cast<size_t>(1u)),
  343. "new_capacity overflow");
  344. new_capacity = max(kMinVectorCapacity, new_capacity);
  345. size_t new_alloc_size = 0;
  346. LOG_ALWAYS_FATAL_IF(!safe_mul(&new_alloc_size, new_capacity, mItemSize),
  347. "new_alloc_size overflow");
  348. // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
  349. if ((mStorage) &&
  350. (mCount==where) &&
  351. (mFlags & HAS_TRIVIAL_COPY) &&
  352. (mFlags & HAS_TRIVIAL_DTOR))
  353. {
  354. const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
  355. SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
  356. if (sb) {
  357. mStorage = sb->data();
  358. } else {
  359. return NULL;
  360. }
  361. } else {
  362. SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
  363. if (sb) {
  364. void* array = sb->data();
  365. if (where != 0) {
  366. _do_copy(array, mStorage, where);
  367. }
  368. if (where != mCount) {
  369. const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
  370. void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  371. _do_copy(dest, from, mCount-where);
  372. }
  373. release_storage();
  374. mStorage = const_cast<void*>(array);
  375. } else {
  376. return NULL;
  377. }
  378. }
  379. } else {
  380. void* array = editArrayImpl();
  381. if (where != mCount) {
  382. const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
  383. void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  384. _do_move_forward(to, from, mCount - where);
  385. }
  386. }
  387. mCount = new_size;
  388. void* free_space = const_cast<void*>(itemLocation(where));
  389. return free_space;
  390. }
  391. void VectorImpl::_shrink(size_t where, size_t amount)
  392. {
  393. if (!mStorage)
  394. return;
  395. // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
  396. // this, (int)where, (int)amount, (int)mCount, (int)capacity());
  397. ALOG_ASSERT(where + amount <= mCount,
  398. "[%p] _shrink: where=%d, amount=%d, count=%d",
  399. this, (int)where, (int)amount, (int)mCount); // caller already checked
  400. size_t new_size;
  401. LOG_ALWAYS_FATAL_IF(!safe_sub(&new_size, mCount, amount));
  402. if (new_size < (capacity() / 2)) {
  403. // NOTE: (new_size * 2) is safe because capacity didn't overflow and
  404. // new_size < (capacity / 2)).
  405. const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
  406. // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
  407. // ((where + amount) * mItemSize) beyond this point are safe because
  408. // we are always reducing the capacity of the underlying SharedBuffer.
  409. // In other words, (old_capacity * mItemSize) did not overflow, and
  410. // where < (where + amount) < new_capacity < old_capacity.
  411. if ((where == new_size) &&
  412. (mFlags & HAS_TRIVIAL_COPY) &&
  413. (mFlags & HAS_TRIVIAL_DTOR))
  414. {
  415. const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
  416. SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
  417. if (sb) {
  418. mStorage = sb->data();
  419. } else {
  420. return;
  421. }
  422. } else {
  423. SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
  424. if (sb) {
  425. void* array = sb->data();
  426. if (where != 0) {
  427. _do_copy(array, mStorage, where);
  428. }
  429. if (where != new_size) {
  430. const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
  431. void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
  432. _do_copy(dest, from, new_size - where);
  433. }
  434. release_storage();
  435. mStorage = const_cast<void*>(array);
  436. } else{
  437. return;
  438. }
  439. }
  440. } else {
  441. void* array = editArrayImpl();
  442. void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
  443. _do_destroy(to, amount);
  444. if (where != new_size) {
  445. const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  446. _do_move_backward(to, from, new_size - where);
  447. }
  448. }
  449. mCount = new_size;
  450. }
  451. size_t VectorImpl::itemSize() const {
  452. return mItemSize;
  453. }
  454. void VectorImpl::_do_construct(void* storage, size_t num) const
  455. {
  456. if (!(mFlags & HAS_TRIVIAL_CTOR)) {
  457. do_construct(storage, num);
  458. }
  459. }
  460. void VectorImpl::_do_destroy(void* storage, size_t num) const
  461. {
  462. if (!(mFlags & HAS_TRIVIAL_DTOR)) {
  463. do_destroy(storage, num);
  464. }
  465. }
  466. void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
  467. {
  468. if (!(mFlags & HAS_TRIVIAL_COPY)) {
  469. do_copy(dest, from, num);
  470. } else {
  471. memcpy(dest, from, num*itemSize());
  472. }
  473. }
  474. void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
  475. do_splat(dest, item, num);
  476. }
  477. void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
  478. do_move_forward(dest, from, num);
  479. }
  480. void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
  481. do_move_backward(dest, from, num);
  482. }
  483. #ifdef NEEDS_VECTORIMPL_SYMBOLS
  484. void VectorImpl::reservedVectorImpl1() { }
  485. void VectorImpl::reservedVectorImpl2() { }
  486. void VectorImpl::reservedVectorImpl3() { }
  487. void VectorImpl::reservedVectorImpl4() { }
  488. void VectorImpl::reservedVectorImpl5() { }
  489. void VectorImpl::reservedVectorImpl6() { }
  490. void VectorImpl::reservedVectorImpl7() { }
  491. void VectorImpl::reservedVectorImpl8() { }
  492. #endif
  493. /*****************************************************************************/
  494. SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
  495. : VectorImpl(itemSize, flags)
  496. {
  497. }
  498. SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
  499. : VectorImpl(rhs)
  500. {
  501. }
  502. SortedVectorImpl::~SortedVectorImpl()
  503. {
  504. }
  505. SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
  506. {
  507. return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
  508. }
  509. ssize_t SortedVectorImpl::indexOf(const void* item) const
  510. {
  511. return _indexOrderOf(item);
  512. }
  513. size_t SortedVectorImpl::orderOf(const void* item) const
  514. {
  515. size_t o;
  516. _indexOrderOf(item, &o);
  517. return o;
  518. }
  519. ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
  520. {
  521. // binary search
  522. ssize_t err = NAME_NOT_FOUND;
  523. ssize_t l = 0;
  524. ssize_t h = size()-1;
  525. ssize_t mid;
  526. const void* a = arrayImpl();
  527. const size_t s = itemSize();
  528. while (l <= h) {
  529. mid = l + (h - l)/2;
  530. const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
  531. const int c = do_compare(curr, item);
  532. if (c == 0) {
  533. err = l = mid;
  534. break;
  535. } else if (c < 0) {
  536. l = mid + 1;
  537. } else {
  538. h = mid - 1;
  539. }
  540. }
  541. if (order) *order = l;
  542. return err;
  543. }
  544. ssize_t SortedVectorImpl::add(const void* item)
  545. {
  546. size_t order;
  547. ssize_t index = _indexOrderOf(item, &order);
  548. if (index < 0) {
  549. index = VectorImpl::insertAt(item, order, 1);
  550. } else {
  551. index = VectorImpl::replaceAt(item, index);
  552. }
  553. return index;
  554. }
  555. ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
  556. {
  557. // naive merge...
  558. if (!vector.isEmpty()) {
  559. const void* buffer = vector.arrayImpl();
  560. const size_t is = itemSize();
  561. size_t s = vector.size();
  562. for (size_t i=0 ; i<s ; i++) {
  563. ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
  564. if (err<0) {
  565. return err;
  566. }
  567. }
  568. }
  569. return NO_ERROR;
  570. }
  571. ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
  572. {
  573. // we've merging a sorted vector... nice!
  574. ssize_t err = NO_ERROR;
  575. if (!vector.isEmpty()) {
  576. // first take care of the case where the vectors are sorted together
  577. if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
  578. err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
  579. } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
  580. err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
  581. } else {
  582. // this could be made a little better
  583. err = merge(static_cast<const VectorImpl&>(vector));
  584. }
  585. }
  586. return err;
  587. }
  588. ssize_t SortedVectorImpl::remove(const void* item)
  589. {
  590. ssize_t i = indexOf(item);
  591. if (i>=0) {
  592. VectorImpl::removeItemsAt(i, 1);
  593. }
  594. return i;
  595. }
  596. #ifdef NEEDS_VECTORIMPL_SYMBOLS
  597. void SortedVectorImpl::reservedSortedVectorImpl1() { };
  598. void SortedVectorImpl::reservedSortedVectorImpl2() { };
  599. void SortedVectorImpl::reservedSortedVectorImpl3() { };
  600. void SortedVectorImpl::reservedSortedVectorImpl4() { };
  601. void SortedVectorImpl::reservedSortedVectorImpl5() { };
  602. void SortedVectorImpl::reservedSortedVectorImpl6() { };
  603. void SortedVectorImpl::reservedSortedVectorImpl7() { };
  604. void SortedVectorImpl::reservedSortedVectorImpl8() { };
  605. #endif
  606. /*****************************************************************************/
  607. }; // namespace android