PageRenderTime 43ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 1ms

/libutils/BasicHashtable.cpp

https://gitlab.com/infraredbg/android_system_core-mt6589
C++ | 342 lines | 290 code | 35 blank | 17 comment | 57 complexity | 8f68e3880742865b02d9b70fcac5d767 MD5 | raw file
  1. /*
  2. * Copyright (C) 2011 The Android Open Source Project
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #define LOG_TAG "BasicHashtable"
  17. #include <math.h>
  18. #include <utils/Log.h>
  19. #include <utils/BasicHashtable.h>
  20. #include <utils/misc.h>
  21. namespace android {
  22. BasicHashtableImpl::BasicHashtableImpl(size_t entrySize, bool hasTrivialDestructor,
  23. size_t minimumInitialCapacity, float loadFactor) :
  24. mBucketSize(entrySize + sizeof(Bucket)), mHasTrivialDestructor(hasTrivialDestructor),
  25. mLoadFactor(loadFactor), mSize(0),
  26. mFilledBuckets(0), mBuckets(NULL) {
  27. determineCapacity(minimumInitialCapacity, mLoadFactor, &mBucketCount, &mCapacity);
  28. }
  29. BasicHashtableImpl::BasicHashtableImpl(const BasicHashtableImpl& other) :
  30. mBucketSize(other.mBucketSize), mHasTrivialDestructor(other.mHasTrivialDestructor),
  31. mCapacity(other.mCapacity), mLoadFactor(other.mLoadFactor),
  32. mSize(other.mSize), mFilledBuckets(other.mFilledBuckets),
  33. mBucketCount(other.mBucketCount), mBuckets(other.mBuckets) {
  34. if (mBuckets) {
  35. SharedBuffer::bufferFromData(mBuckets)->acquire();
  36. }
  37. }
  38. BasicHashtableImpl::~BasicHashtableImpl()
  39. {
  40. }
  41. void BasicHashtableImpl::dispose() {
  42. if (mBuckets) {
  43. releaseBuckets(mBuckets, mBucketCount);
  44. }
  45. }
  46. void BasicHashtableImpl::clone() {
  47. if (mBuckets) {
  48. void* newBuckets = allocateBuckets(mBucketCount);
  49. copyBuckets(mBuckets, newBuckets, mBucketCount);
  50. releaseBuckets(mBuckets, mBucketCount);
  51. mBuckets = newBuckets;
  52. }
  53. }
  54. void BasicHashtableImpl::setTo(const BasicHashtableImpl& other) {
  55. if (mBuckets) {
  56. releaseBuckets(mBuckets, mBucketCount);
  57. }
  58. mCapacity = other.mCapacity;
  59. mLoadFactor = other.mLoadFactor;
  60. mSize = other.mSize;
  61. mFilledBuckets = other.mFilledBuckets;
  62. mBucketCount = other.mBucketCount;
  63. mBuckets = other.mBuckets;
  64. if (mBuckets) {
  65. SharedBuffer::bufferFromData(mBuckets)->acquire();
  66. }
  67. }
  68. void BasicHashtableImpl::clear() {
  69. if (mBuckets) {
  70. if (mFilledBuckets) {
  71. SharedBuffer* sb = SharedBuffer::bufferFromData(mBuckets);
  72. if (sb->onlyOwner()) {
  73. destroyBuckets(mBuckets, mBucketCount);
  74. for (size_t i = 0; i < mBucketCount; i++) {
  75. Bucket& bucket = bucketAt(mBuckets, i);
  76. bucket.cookie = 0;
  77. }
  78. } else {
  79. releaseBuckets(mBuckets, mBucketCount);
  80. mBuckets = NULL;
  81. }
  82. mFilledBuckets = 0;
  83. }
  84. mSize = 0;
  85. }
  86. }
  87. ssize_t BasicHashtableImpl::next(ssize_t index) const {
  88. if (mSize) {
  89. while (size_t(++index) < mBucketCount) {
  90. const Bucket& bucket = bucketAt(mBuckets, index);
  91. if (bucket.cookie & Bucket::PRESENT) {
  92. return index;
  93. }
  94. }
  95. }
  96. return -1;
  97. }
  98. ssize_t BasicHashtableImpl::find(ssize_t index, hash_t hash,
  99. const void* __restrict__ key) const {
  100. if (!mSize) {
  101. return -1;
  102. }
  103. hash = trimHash(hash);
  104. if (index < 0) {
  105. index = chainStart(hash, mBucketCount);
  106. const Bucket& bucket = bucketAt(mBuckets, size_t(index));
  107. if (bucket.cookie & Bucket::PRESENT) {
  108. if (compareBucketKey(bucket, key)) {
  109. return index;
  110. }
  111. } else {
  112. if (!(bucket.cookie & Bucket::COLLISION)) {
  113. return -1;
  114. }
  115. }
  116. }
  117. size_t inc = chainIncrement(hash, mBucketCount);
  118. for (;;) {
  119. index = chainSeek(index, inc, mBucketCount);
  120. const Bucket& bucket = bucketAt(mBuckets, size_t(index));
  121. if (bucket.cookie & Bucket::PRESENT) {
  122. if ((bucket.cookie & Bucket::HASH_MASK) == hash
  123. && compareBucketKey(bucket, key)) {
  124. return index;
  125. }
  126. }
  127. if (!(bucket.cookie & Bucket::COLLISION)) {
  128. return -1;
  129. }
  130. }
  131. }
  132. size_t BasicHashtableImpl::add(hash_t hash, const void* entry) {
  133. if (!mBuckets) {
  134. mBuckets = allocateBuckets(mBucketCount);
  135. } else {
  136. edit();
  137. }
  138. hash = trimHash(hash);
  139. for (;;) {
  140. size_t index = chainStart(hash, mBucketCount);
  141. Bucket* bucket = &bucketAt(mBuckets, size_t(index));
  142. if (bucket->cookie & Bucket::PRESENT) {
  143. size_t inc = chainIncrement(hash, mBucketCount);
  144. do {
  145. bucket->cookie |= Bucket::COLLISION;
  146. index = chainSeek(index, inc, mBucketCount);
  147. bucket = &bucketAt(mBuckets, size_t(index));
  148. } while (bucket->cookie & Bucket::PRESENT);
  149. }
  150. uint32_t collision = bucket->cookie & Bucket::COLLISION;
  151. if (!collision) {
  152. if (mFilledBuckets >= mCapacity) {
  153. rehash(mCapacity * 2, mLoadFactor);
  154. continue;
  155. }
  156. mFilledBuckets += 1;
  157. }
  158. bucket->cookie = collision | Bucket::PRESENT | hash;
  159. mSize += 1;
  160. initializeBucketEntry(*bucket, entry);
  161. return index;
  162. }
  163. }
  164. void BasicHashtableImpl::removeAt(size_t index) {
  165. edit();
  166. Bucket& bucket = bucketAt(mBuckets, index);
  167. bucket.cookie &= ~Bucket::PRESENT;
  168. if (!(bucket.cookie & Bucket::COLLISION)) {
  169. mFilledBuckets -= 1;
  170. }
  171. mSize -= 1;
  172. if (!mHasTrivialDestructor) {
  173. destroyBucketEntry(bucket);
  174. }
  175. }
  176. void BasicHashtableImpl::rehash(size_t minimumCapacity, float loadFactor) {
  177. if (minimumCapacity < mSize) {
  178. minimumCapacity = mSize;
  179. }
  180. size_t newBucketCount, newCapacity;
  181. determineCapacity(minimumCapacity, loadFactor, &newBucketCount, &newCapacity);
  182. if (newBucketCount != mBucketCount || newCapacity != mCapacity) {
  183. if (mBuckets) {
  184. void* newBuckets;
  185. if (mSize) {
  186. newBuckets = allocateBuckets(newBucketCount);
  187. for (size_t i = 0; i < mBucketCount; i++) {
  188. const Bucket& fromBucket = bucketAt(mBuckets, i);
  189. if (fromBucket.cookie & Bucket::PRESENT) {
  190. hash_t hash = fromBucket.cookie & Bucket::HASH_MASK;
  191. size_t index = chainStart(hash, newBucketCount);
  192. Bucket* toBucket = &bucketAt(newBuckets, size_t(index));
  193. if (toBucket->cookie & Bucket::PRESENT) {
  194. size_t inc = chainIncrement(hash, newBucketCount);
  195. do {
  196. toBucket->cookie |= Bucket::COLLISION;
  197. index = chainSeek(index, inc, newBucketCount);
  198. toBucket = &bucketAt(newBuckets, size_t(index));
  199. } while (toBucket->cookie & Bucket::PRESENT);
  200. }
  201. toBucket->cookie = Bucket::PRESENT | hash;
  202. initializeBucketEntry(*toBucket, fromBucket.entry);
  203. }
  204. }
  205. } else {
  206. newBuckets = NULL;
  207. }
  208. releaseBuckets(mBuckets, mBucketCount);
  209. mBuckets = newBuckets;
  210. mFilledBuckets = mSize;
  211. }
  212. mBucketCount = newBucketCount;
  213. mCapacity = newCapacity;
  214. }
  215. mLoadFactor = loadFactor;
  216. }
  217. void* BasicHashtableImpl::allocateBuckets(size_t count) const {
  218. size_t bytes = count * mBucketSize;
  219. SharedBuffer* sb = SharedBuffer::alloc(bytes);
  220. LOG_ALWAYS_FATAL_IF(!sb, "Could not allocate %u bytes for hashtable with %u buckets.",
  221. uint32_t(bytes), uint32_t(count));
  222. void* buckets = sb->data();
  223. for (size_t i = 0; i < count; i++) {
  224. Bucket& bucket = bucketAt(buckets, i);
  225. bucket.cookie = 0;
  226. }
  227. return buckets;
  228. }
  229. void BasicHashtableImpl::releaseBuckets(void* __restrict__ buckets, size_t count) const {
  230. SharedBuffer* sb = SharedBuffer::bufferFromData(buckets);
  231. if (sb->release(SharedBuffer::eKeepStorage) == 1) {
  232. destroyBuckets(buckets, count);
  233. SharedBuffer::dealloc(sb);
  234. }
  235. }
  236. void BasicHashtableImpl::destroyBuckets(void* __restrict__ buckets, size_t count) const {
  237. if (!mHasTrivialDestructor) {
  238. for (size_t i = 0; i < count; i++) {
  239. Bucket& bucket = bucketAt(buckets, i);
  240. if (bucket.cookie & Bucket::PRESENT) {
  241. destroyBucketEntry(bucket);
  242. }
  243. }
  244. }
  245. }
  246. void BasicHashtableImpl::copyBuckets(const void* __restrict__ fromBuckets,
  247. void* __restrict__ toBuckets, size_t count) const {
  248. for (size_t i = 0; i < count; i++) {
  249. const Bucket& fromBucket = bucketAt(fromBuckets, i);
  250. Bucket& toBucket = bucketAt(toBuckets, i);
  251. toBucket.cookie = fromBucket.cookie;
  252. if (fromBucket.cookie & Bucket::PRESENT) {
  253. initializeBucketEntry(toBucket, fromBucket.entry);
  254. }
  255. }
  256. }
  257. // Table of 31-bit primes where each prime is no less than twice as large
  258. // as the previous one. Generated by "primes.py".
  259. static size_t PRIMES[] = {
  260. 5,
  261. 11,
  262. 23,
  263. 47,
  264. 97,
  265. 197,
  266. 397,
  267. 797,
  268. 1597,
  269. 3203,
  270. 6421,
  271. 12853,
  272. 25717,
  273. 51437,
  274. 102877,
  275. 205759,
  276. 411527,
  277. 823117,
  278. 1646237,
  279. 3292489,
  280. 6584983,
  281. 13169977,
  282. 26339969,
  283. 52679969,
  284. 105359939,
  285. 210719881,
  286. 421439783,
  287. 842879579,
  288. 1685759167,
  289. 0,
  290. };
  291. void BasicHashtableImpl::determineCapacity(size_t minimumCapacity, float loadFactor,
  292. size_t* __restrict__ outBucketCount, size_t* __restrict__ outCapacity) {
  293. LOG_ALWAYS_FATAL_IF(loadFactor <= 0.0f || loadFactor > 1.0f,
  294. "Invalid load factor %0.3f. Must be in the range (0, 1].", loadFactor);
  295. size_t count = ceilf(minimumCapacity / loadFactor) + 1;
  296. size_t i = 0;
  297. while (count > PRIMES[i] && i < NELEM(PRIMES)) {
  298. i++;
  299. }
  300. count = PRIMES[i];
  301. LOG_ALWAYS_FATAL_IF(!count, "Could not determine required number of buckets for "
  302. "hashtable with minimum capacity %u and load factor %0.3f.",
  303. uint32_t(minimumCapacity), loadFactor);
  304. *outBucketCount = count;
  305. *outCapacity = ceilf((count - 1) * loadFactor);
  306. }
  307. }; // namespace android