PageRenderTime 47ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/frameworks/native/libs/utils/VectorImpl.cpp

https://gitlab.com/brian0218/rk3066_r-box_android4.2.2_sdk
C++ | 634 lines | 515 code | 82 blank | 37 comment | 93 complexity | dd50098207265fd9fd82e2239c817789 MD5 | raw file
  1. /*
  2. * Copyright (C) 2005 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "Vector"
  17. #include <string.h>
  18. #include <stdlib.h>
  19. #include <stdio.h>
  20. #include <cutils/log.h>
  21. #include <utils/Errors.h>
  22. #include <utils/SharedBuffer.h>
  23. #include <utils/VectorImpl.h>
  24. /*****************************************************************************/
  25. namespace android {
  26. // ----------------------------------------------------------------------------
  27. const size_t kMinVectorCapacity = 4;
  28. static inline size_t max(size_t a, size_t b) {
  29. return a>b ? a : b;
  30. }
  31. // ----------------------------------------------------------------------------
  32. VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
  33. : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
  34. {
  35. }
  36. VectorImpl::VectorImpl(const VectorImpl& rhs)
  37. : mStorage(rhs.mStorage), mCount(rhs.mCount),
  38. mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
  39. {
  40. if (mStorage) {
  41. SharedBuffer::bufferFromData(mStorage)->acquire();
  42. }
  43. }
  44. VectorImpl::~VectorImpl()
  45. {
  46. ALOGW_IF(mCount,
  47. "[%p] subclasses of VectorImpl must call finish_vector()"
  48. " in their destructor. Leaking %d bytes.",
  49. this, (int)(mCount*mItemSize));
  50. // We can't call _do_destroy() here because the vtable is already gone.
  51. }
  52. VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
  53. {
  54. LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
  55. "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
  56. if (this != &rhs) {
  57. release_storage();
  58. if (rhs.mCount) {
  59. mStorage = rhs.mStorage;
  60. mCount = rhs.mCount;
  61. SharedBuffer::bufferFromData(mStorage)->acquire();
  62. } else {
  63. mStorage = 0;
  64. mCount = 0;
  65. }
  66. }
  67. return *this;
  68. }
  69. void* VectorImpl::editArrayImpl()
  70. {
  71. if (mStorage) {
  72. SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage)->attemptEdit();
  73. if (sb == 0) {
  74. sb = SharedBuffer::alloc(capacity() * mItemSize);
  75. if (sb) {
  76. _do_copy(sb->data(), mStorage, mCount);
  77. release_storage();
  78. mStorage = sb->data();
  79. }
  80. }
  81. }
  82. return mStorage;
  83. }
  84. size_t VectorImpl::capacity() const
  85. {
  86. if (mStorage) {
  87. return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
  88. }
  89. return 0;
  90. }
  91. ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
  92. {
  93. return insertArrayAt(vector.arrayImpl(), index, vector.size());
  94. }
  95. ssize_t VectorImpl::appendVector(const VectorImpl& vector)
  96. {
  97. return insertVectorAt(vector, size());
  98. }
  99. ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
  100. {
  101. if (index > size())
  102. return BAD_INDEX;
  103. void* where = _grow(index, length);
  104. if (where) {
  105. _do_copy(where, array, length);
  106. }
  107. return where ? index : (ssize_t)NO_MEMORY;
  108. }
  109. ssize_t VectorImpl::appendArray(const void* array, size_t length)
  110. {
  111. return insertArrayAt(array, size(), length);
  112. }
  113. ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
  114. {
  115. return insertAt(0, index, numItems);
  116. }
  117. ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
  118. {
  119. if (index > size())
  120. return BAD_INDEX;
  121. void* where = _grow(index, numItems);
  122. if (where) {
  123. if (item) {
  124. _do_splat(where, item, numItems);
  125. } else {
  126. _do_construct(where, numItems);
  127. }
  128. }
  129. return where ? index : (ssize_t)NO_MEMORY;
  130. }
  131. static int sortProxy(const void* lhs, const void* rhs, void* func)
  132. {
  133. return (*(VectorImpl::compar_t)func)(lhs, rhs);
  134. }
  135. status_t VectorImpl::sort(VectorImpl::compar_t cmp)
  136. {
  137. return sort(sortProxy, (void*)cmp);
  138. }
  139. status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
  140. {
  141. // the sort must be stable. we're using insertion sort which
  142. // is well suited for small and already sorted arrays
  143. // for big arrays, it could be better to use mergesort
  144. const ssize_t count = size();
  145. if (count > 1) {
  146. void* array = const_cast<void*>(arrayImpl());
  147. void* temp = 0;
  148. ssize_t i = 1;
  149. while (i < count) {
  150. void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
  151. void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
  152. if (cmp(curr, item, state) > 0) {
  153. if (!temp) {
  154. // we're going to have to modify the array...
  155. array = editArrayImpl();
  156. if (!array) return NO_MEMORY;
  157. temp = malloc(mItemSize);
  158. if (!temp) return NO_MEMORY;
  159. item = reinterpret_cast<char*>(array) + mItemSize*(i);
  160. curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
  161. } else {
  162. _do_destroy(temp, 1);
  163. }
  164. _do_copy(temp, item, 1);
  165. ssize_t j = i-1;
  166. void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
  167. do {
  168. _do_destroy(next, 1);
  169. _do_copy(next, curr, 1);
  170. next = curr;
  171. --j;
  172. curr = reinterpret_cast<char*>(array) + mItemSize*(j);
  173. } while (j>=0 && (cmp(curr, temp, state) > 0));
  174. _do_destroy(next, 1);
  175. _do_copy(next, temp, 1);
  176. }
  177. i++;
  178. }
  179. if (temp) {
  180. _do_destroy(temp, 1);
  181. free(temp);
  182. }
  183. }
  184. return NO_ERROR;
  185. }
  186. void VectorImpl::pop()
  187. {
  188. if (size())
  189. removeItemsAt(size()-1, 1);
  190. }
  191. void VectorImpl::push()
  192. {
  193. push(0);
  194. }
  195. void VectorImpl::push(const void* item)
  196. {
  197. insertAt(item, size());
  198. }
  199. ssize_t VectorImpl::add()
  200. {
  201. return add(0);
  202. }
  203. ssize_t VectorImpl::add(const void* item)
  204. {
  205. return insertAt(item, size());
  206. }
  207. ssize_t VectorImpl::replaceAt(size_t index)
  208. {
  209. return replaceAt(0, index);
  210. }
  211. ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
  212. {
  213. ALOG_ASSERT(index<size(),
  214. "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
  215. if (index >= size()) {
  216. return BAD_INDEX;
  217. }
  218. void* item = editItemLocation(index);
  219. if (item != prototype) {
  220. if (item == 0)
  221. return NO_MEMORY;
  222. _do_destroy(item, 1);
  223. if (prototype == 0) {
  224. _do_construct(item, 1);
  225. } else {
  226. _do_copy(item, prototype, 1);
  227. }
  228. }
  229. return ssize_t(index);
  230. }
  231. ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
  232. {
  233. ALOG_ASSERT((index+count)<=size(),
  234. "[%p] remove: index=%d, count=%d, size=%d",
  235. this, (int)index, (int)count, (int)size());
  236. if ((index+count) > size())
  237. return BAD_VALUE;
  238. _shrink(index, count);
  239. return index;
  240. }
  241. void VectorImpl::finish_vector()
  242. {
  243. release_storage();
  244. mStorage = 0;
  245. mCount = 0;
  246. }
  247. void VectorImpl::clear()
  248. {
  249. _shrink(0, mCount);
  250. }
  251. void* VectorImpl::editItemLocation(size_t index)
  252. {
  253. ALOG_ASSERT(index<capacity(),
  254. "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
  255. this, (int)index, (int)capacity(), (int)mCount);
  256. if (index < capacity()) {
  257. void* buffer = editArrayImpl();
  258. if (buffer) {
  259. return reinterpret_cast<char*>(buffer) + index*mItemSize;
  260. }
  261. }
  262. return 0;
  263. }
  264. const void* VectorImpl::itemLocation(size_t index) const
  265. {
  266. ALOG_ASSERT(index<capacity(),
  267. "[%p] itemLocation: index=%d, capacity=%d, count=%d",
  268. this, (int)index, (int)capacity(), (int)mCount);
  269. if (index < capacity()) {
  270. const void* buffer = arrayImpl();
  271. if (buffer) {
  272. return reinterpret_cast<const char*>(buffer) + index*mItemSize;
  273. }
  274. }
  275. return 0;
  276. }
  277. ssize_t VectorImpl::setCapacity(size_t new_capacity)
  278. {
  279. size_t current_capacity = capacity();
  280. ssize_t amount = new_capacity - size();
  281. if (amount <= 0) {
  282. // we can't reduce the capacity
  283. return current_capacity;
  284. }
  285. SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
  286. if (sb) {
  287. void* array = sb->data();
  288. _do_copy(array, mStorage, size());
  289. release_storage();
  290. mStorage = const_cast<void*>(array);
  291. } else {
  292. return NO_MEMORY;
  293. }
  294. return new_capacity;
  295. }
  296. void VectorImpl::release_storage()
  297. {
  298. if (mStorage) {
  299. const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
  300. if (sb->release(SharedBuffer::eKeepStorage) == 1) {
  301. _do_destroy(mStorage, mCount);
  302. SharedBuffer::dealloc(sb);
  303. }
  304. }
  305. }
  306. void* VectorImpl::_grow(size_t where, size_t amount)
  307. {
  308. // ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
  309. // this, (int)where, (int)amount, (int)mCount, (int)capacity());
  310. ALOG_ASSERT(where <= mCount,
  311. "[%p] _grow: where=%d, amount=%d, count=%d",
  312. this, (int)where, (int)amount, (int)mCount); // caller already checked
  313. const size_t new_size = mCount + amount;
  314. if (capacity() < new_size) {
  315. const size_t new_capacity = max(kMinVectorCapacity, ((new_size*3)+1)/2);
  316. // ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
  317. if ((mStorage) &&
  318. (mCount==where) &&
  319. (mFlags & HAS_TRIVIAL_COPY) &&
  320. (mFlags & HAS_TRIVIAL_DTOR))
  321. {
  322. const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
  323. SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
  324. mStorage = sb->data();
  325. } else {
  326. SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
  327. if (sb) {
  328. void* array = sb->data();
  329. if (where != 0) {
  330. _do_copy(array, mStorage, where);
  331. }
  332. if (where != mCount) {
  333. const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
  334. void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  335. _do_copy(dest, from, mCount-where);
  336. }
  337. release_storage();
  338. mStorage = const_cast<void*>(array);
  339. }
  340. }
  341. } else {
  342. void* array = editArrayImpl();
  343. if (where != mCount) {
  344. const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
  345. void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  346. _do_move_forward(to, from, mCount - where);
  347. }
  348. }
  349. mCount = new_size;
  350. void* free_space = const_cast<void*>(itemLocation(where));
  351. return free_space;
  352. }
  353. void VectorImpl::_shrink(size_t where, size_t amount)
  354. {
  355. if (!mStorage)
  356. return;
  357. // ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
  358. // this, (int)where, (int)amount, (int)mCount, (int)capacity());
  359. ALOG_ASSERT(where + amount <= mCount,
  360. "[%p] _shrink: where=%d, amount=%d, count=%d",
  361. this, (int)where, (int)amount, (int)mCount); // caller already checked
  362. const size_t new_size = mCount - amount;
  363. if (new_size*3 < capacity()) {
  364. const size_t new_capacity = max(kMinVectorCapacity, new_size*2);
  365. // ALOGV("shrink vector %p, new_capacity=%d", this, (int)new_capacity);
  366. if ((where == new_size) &&
  367. (mFlags & HAS_TRIVIAL_COPY) &&
  368. (mFlags & HAS_TRIVIAL_DTOR))
  369. {
  370. const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
  371. SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
  372. mStorage = sb->data();
  373. } else {
  374. SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
  375. if (sb) {
  376. void* array = sb->data();
  377. if (where != 0) {
  378. _do_copy(array, mStorage, where);
  379. }
  380. if (where != new_size) {
  381. const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
  382. void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
  383. _do_copy(dest, from, new_size - where);
  384. }
  385. release_storage();
  386. mStorage = const_cast<void*>(array);
  387. }
  388. }
  389. } else {
  390. void* array = editArrayImpl();
  391. void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
  392. _do_destroy(to, amount);
  393. if (where != new_size) {
  394. const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
  395. _do_move_backward(to, from, new_size - where);
  396. }
  397. }
  398. mCount = new_size;
  399. }
  400. size_t VectorImpl::itemSize() const {
  401. return mItemSize;
  402. }
  403. void VectorImpl::_do_construct(void* storage, size_t num) const
  404. {
  405. if (!(mFlags & HAS_TRIVIAL_CTOR)) {
  406. do_construct(storage, num);
  407. }
  408. }
  409. void VectorImpl::_do_destroy(void* storage, size_t num) const
  410. {
  411. if (!(mFlags & HAS_TRIVIAL_DTOR)) {
  412. do_destroy(storage, num);
  413. }
  414. }
  415. void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
  416. {
  417. if (!(mFlags & HAS_TRIVIAL_COPY)) {
  418. do_copy(dest, from, num);
  419. } else {
  420. memcpy(dest, from, num*itemSize());
  421. }
  422. }
  423. void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
  424. do_splat(dest, item, num);
  425. }
  426. void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
  427. do_move_forward(dest, from, num);
  428. }
  429. void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
  430. do_move_backward(dest, from, num);
  431. }
  432. void VectorImpl::reservedVectorImpl1() { }
  433. void VectorImpl::reservedVectorImpl2() { }
  434. void VectorImpl::reservedVectorImpl3() { }
  435. void VectorImpl::reservedVectorImpl4() { }
  436. void VectorImpl::reservedVectorImpl5() { }
  437. void VectorImpl::reservedVectorImpl6() { }
  438. void VectorImpl::reservedVectorImpl7() { }
  439. void VectorImpl::reservedVectorImpl8() { }
  440. /*****************************************************************************/
  441. SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
  442. : VectorImpl(itemSize, flags)
  443. {
  444. }
  445. SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
  446. : VectorImpl(rhs)
  447. {
  448. }
  449. SortedVectorImpl::~SortedVectorImpl()
  450. {
  451. }
  452. SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
  453. {
  454. return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
  455. }
  456. ssize_t SortedVectorImpl::indexOf(const void* item) const
  457. {
  458. return _indexOrderOf(item);
  459. }
  460. size_t SortedVectorImpl::orderOf(const void* item) const
  461. {
  462. size_t o;
  463. _indexOrderOf(item, &o);
  464. return o;
  465. }
  466. ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
  467. {
  468. // binary search
  469. ssize_t err = NAME_NOT_FOUND;
  470. ssize_t l = 0;
  471. ssize_t h = size()-1;
  472. ssize_t mid;
  473. const void* a = arrayImpl();
  474. const size_t s = itemSize();
  475. while (l <= h) {
  476. mid = l + (h - l)/2;
  477. const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
  478. const int c = do_compare(curr, item);
  479. if (c == 0) {
  480. err = l = mid;
  481. break;
  482. } else if (c < 0) {
  483. l = mid + 1;
  484. } else {
  485. h = mid - 1;
  486. }
  487. }
  488. if (order) *order = l;
  489. return err;
  490. }
  491. ssize_t SortedVectorImpl::add(const void* item)
  492. {
  493. size_t order;
  494. ssize_t index = _indexOrderOf(item, &order);
  495. if (index < 0) {
  496. index = VectorImpl::insertAt(item, order, 1);
  497. } else {
  498. index = VectorImpl::replaceAt(item, index);
  499. }
  500. return index;
  501. }
  502. ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
  503. {
  504. // naive merge...
  505. if (!vector.isEmpty()) {
  506. const void* buffer = vector.arrayImpl();
  507. const size_t is = itemSize();
  508. size_t s = vector.size();
  509. for (size_t i=0 ; i<s ; i++) {
  510. ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
  511. if (err<0) {
  512. return err;
  513. }
  514. }
  515. }
  516. return NO_ERROR;
  517. }
  518. ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
  519. {
  520. // we've merging a sorted vector... nice!
  521. ssize_t err = NO_ERROR;
  522. if (!vector.isEmpty()) {
  523. // first take care of the case where the vectors are sorted together
  524. if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
  525. err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
  526. } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
  527. err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
  528. } else {
  529. // this could be made a little better
  530. err = merge(static_cast<const VectorImpl&>(vector));
  531. }
  532. }
  533. return err;
  534. }
  535. ssize_t SortedVectorImpl::remove(const void* item)
  536. {
  537. ssize_t i = indexOf(item);
  538. if (i>=0) {
  539. VectorImpl::removeItemsAt(i, 1);
  540. }
  541. return i;
  542. }
  543. void SortedVectorImpl::reservedSortedVectorImpl1() { };
  544. void SortedVectorImpl::reservedSortedVectorImpl2() { };
  545. void SortedVectorImpl::reservedSortedVectorImpl3() { };
  546. void SortedVectorImpl::reservedSortedVectorImpl4() { };
  547. void SortedVectorImpl::reservedSortedVectorImpl5() { };
  548. void SortedVectorImpl::reservedSortedVectorImpl6() { };
  549. void SortedVectorImpl::reservedSortedVectorImpl7() { };
  550. void SortedVectorImpl::reservedSortedVectorImpl8() { };
  551. /*****************************************************************************/
  552. }; // namespace android