PageRenderTime 65ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 1ms

/Zend/zend_hash.c

http://github.com/php/php-src
C | 2853 lines | 2426 code | 357 blank | 70 comment | 765 complexity | 926f7b62b6b0a6c1b157476975d538fe MD5 | raw file
Possible License(s): BSD-2-Clause, BSD-3-Clause, MPL-2.0-no-copyleft-exception, LGPL-2.1

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. +----------------------------------------------------------------------+
  3. | Zend Engine |
  4. +----------------------------------------------------------------------+
  5. | Copyright (c) Zend Technologies Ltd. (http://www.zend.com) |
  6. +----------------------------------------------------------------------+
  7. | This source file is subject to version 2.00 of the Zend license, |
  8. | that is bundled with this package in the file LICENSE, and is |
  9. | available through the world-wide-web at the following url: |
  10. | http://www.zend.com/license/2_00.txt. |
  11. | If you did not receive a copy of the Zend license and are unable to |
  12. | obtain it through the world-wide-web, please send a note to |
  13. | license@zend.com so we can mail you a copy immediately. |
  14. +----------------------------------------------------------------------+
  15. | Authors: Andi Gutmans <andi@php.net> |
  16. | Zeev Suraski <zeev@php.net> |
  17. | Dmitry Stogov <dmitry@php.net> |
  18. +----------------------------------------------------------------------+
  19. */
  20. #include "zend.h"
  21. #include "zend_globals.h"
  22. #include "zend_variables.h"
  23. #if defined(__aarch64__)
  24. # include <arm_neon.h>
  25. #endif
  26. #ifdef __SSE2__
  27. # include <mmintrin.h>
  28. # include <emmintrin.h>
  29. #endif
  30. #if ZEND_DEBUG
  31. # define HT_ASSERT(ht, expr) \
  32. ZEND_ASSERT((expr) || (HT_FLAGS(ht) & HASH_FLAG_ALLOW_COW_VIOLATION))
  33. #else
  34. # define HT_ASSERT(ht, expr)
  35. #endif
  36. #define HT_ASSERT_RC1(ht) HT_ASSERT(ht, GC_REFCOUNT(ht) == 1)
  37. #define HT_POISONED_PTR ((HashTable *) (intptr_t) -1)
  38. #if ZEND_DEBUG
  39. #define HT_OK 0x00
  40. #define HT_IS_DESTROYING 0x01
  41. #define HT_DESTROYED 0x02
  42. #define HT_CLEANING 0x03
  43. static void _zend_is_inconsistent(const HashTable *ht, const char *file, int line)
  44. {
  45. if ((HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) == HT_OK) {
  46. return;
  47. }
  48. switch (HT_FLAGS(ht) & HASH_FLAG_CONSISTENCY) {
  49. case HT_IS_DESTROYING:
  50. zend_output_debug_string(1, "%s(%d) : ht=%p is being destroyed", file, line, ht);
  51. break;
  52. case HT_DESTROYED:
  53. zend_output_debug_string(1, "%s(%d) : ht=%p is already destroyed", file, line, ht);
  54. break;
  55. case HT_CLEANING:
  56. zend_output_debug_string(1, "%s(%d) : ht=%p is being cleaned", file, line, ht);
  57. break;
  58. default:
  59. zend_output_debug_string(1, "%s(%d) : ht=%p is inconsistent", file, line, ht);
  60. break;
  61. }
  62. ZEND_ASSERT(0);
  63. }
  64. #define IS_CONSISTENT(a) _zend_is_inconsistent(a, __FILE__, __LINE__);
  65. #define SET_INCONSISTENT(n) do { \
  66. HT_FLAGS(ht) = (HT_FLAGS(ht) & ~HASH_FLAG_CONSISTENCY) | (n); \
  67. } while (0)
  68. #else
  69. #define IS_CONSISTENT(a)
  70. #define SET_INCONSISTENT(n)
  71. #endif
  72. #define ZEND_HASH_IF_FULL_DO_RESIZE(ht) \
  73. if ((ht)->nNumUsed >= (ht)->nTableSize) { \
  74. zend_hash_do_resize(ht); \
  75. }
  76. ZEND_API void *zend_hash_str_find_ptr_lc(const HashTable *ht, const char *str, size_t len) {
  77. void *result;
  78. char *lc_str;
  79. /* Stack allocate small strings to improve performance */
  80. ALLOCA_FLAG(use_heap)
  81. lc_str = zend_str_tolower_copy(do_alloca(len + 1, use_heap), str, len);
  82. result = zend_hash_str_find_ptr(ht, lc_str, len);
  83. free_alloca(lc_str, use_heap);
  84. return result;
  85. }
  86. ZEND_API void *zend_hash_find_ptr_lc(const HashTable *ht, zend_string *key) {
  87. void *result;
  88. zend_string *lc_key = zend_string_tolower(key);
  89. result = zend_hash_find_ptr(ht, lc_key);
  90. zend_string_release(lc_key);
  91. return result;
  92. }
  93. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht);
  94. static zend_always_inline uint32_t zend_hash_check_size(uint32_t nSize)
  95. {
  96. #if defined(ZEND_WIN32)
  97. unsigned long index;
  98. #endif
  99. /* Use big enough power of 2 */
  100. /* size should be between HT_MIN_SIZE and HT_MAX_SIZE */
  101. if (nSize <= HT_MIN_SIZE) {
  102. return HT_MIN_SIZE;
  103. } else if (UNEXPECTED(nSize >= HT_MAX_SIZE)) {
  104. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", nSize, sizeof(Bucket), sizeof(Bucket));
  105. }
  106. #if defined(ZEND_WIN32)
  107. if (BitScanReverse(&index, nSize - 1)) {
  108. return 0x2u << ((31 - index) ^ 0x1f);
  109. } else {
  110. /* nSize is ensured to be in the valid range, fall back to it
  111. rather than using an undefined bis scan result. */
  112. return nSize;
  113. }
  114. #elif (defined(__GNUC__) || __has_builtin(__builtin_clz)) && defined(PHP_HAVE_BUILTIN_CLZ)
  115. return 0x2u << (__builtin_clz(nSize - 1) ^ 0x1f);
  116. #else
  117. nSize -= 1;
  118. nSize |= (nSize >> 1);
  119. nSize |= (nSize >> 2);
  120. nSize |= (nSize >> 4);
  121. nSize |= (nSize >> 8);
  122. nSize |= (nSize >> 16);
  123. return nSize + 1;
  124. #endif
  125. }
  126. static zend_always_inline void zend_hash_real_init_packed_ex(HashTable *ht)
  127. {
  128. void *data;
  129. if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
  130. data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), 1);
  131. } else if (EXPECTED(ht->nTableSize == HT_MIN_SIZE)) {
  132. data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_MIN_MASK));
  133. } else {
  134. data = emalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK));
  135. }
  136. HT_SET_DATA_ADDR(ht, data);
  137. /* Don't overwrite iterator count. */
  138. ht->u.v.flags = HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  139. HT_HASH_RESET_PACKED(ht);
  140. }
  141. static zend_always_inline void zend_hash_real_init_mixed_ex(HashTable *ht)
  142. {
  143. void *data;
  144. uint32_t nSize = ht->nTableSize;
  145. if (UNEXPECTED(GC_FLAGS(ht) & IS_ARRAY_PERSISTENT)) {
  146. data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), 1);
  147. } else if (EXPECTED(nSize == HT_MIN_SIZE)) {
  148. data = emalloc(HT_SIZE_EX(HT_MIN_SIZE, HT_SIZE_TO_MASK(HT_MIN_SIZE)));
  149. ht->nTableMask = HT_SIZE_TO_MASK(HT_MIN_SIZE);
  150. HT_SET_DATA_ADDR(ht, data);
  151. /* Don't overwrite iterator count. */
  152. ht->u.v.flags = HASH_FLAG_STATIC_KEYS;
  153. #ifdef __SSE2__
  154. do {
  155. __m128i xmm0 = _mm_setzero_si128();
  156. xmm0 = _mm_cmpeq_epi8(xmm0, xmm0);
  157. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 0), xmm0);
  158. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 4), xmm0);
  159. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 8), xmm0);
  160. _mm_storeu_si128((__m128i*)&HT_HASH_EX(data, 12), xmm0);
  161. } while (0);
  162. #elif defined(__aarch64__)
  163. do {
  164. int32x4_t t = vdupq_n_s32(-1);
  165. vst1q_s32((int32_t*)&HT_HASH_EX(data, 0), t);
  166. vst1q_s32((int32_t*)&HT_HASH_EX(data, 4), t);
  167. vst1q_s32((int32_t*)&HT_HASH_EX(data, 8), t);
  168. vst1q_s32((int32_t*)&HT_HASH_EX(data, 12), t);
  169. } while (0);
  170. #else
  171. HT_HASH_EX(data, 0) = -1;
  172. HT_HASH_EX(data, 1) = -1;
  173. HT_HASH_EX(data, 2) = -1;
  174. HT_HASH_EX(data, 3) = -1;
  175. HT_HASH_EX(data, 4) = -1;
  176. HT_HASH_EX(data, 5) = -1;
  177. HT_HASH_EX(data, 6) = -1;
  178. HT_HASH_EX(data, 7) = -1;
  179. HT_HASH_EX(data, 8) = -1;
  180. HT_HASH_EX(data, 9) = -1;
  181. HT_HASH_EX(data, 10) = -1;
  182. HT_HASH_EX(data, 11) = -1;
  183. HT_HASH_EX(data, 12) = -1;
  184. HT_HASH_EX(data, 13) = -1;
  185. HT_HASH_EX(data, 14) = -1;
  186. HT_HASH_EX(data, 15) = -1;
  187. #endif
  188. return;
  189. } else {
  190. data = emalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)));
  191. }
  192. ht->nTableMask = HT_SIZE_TO_MASK(nSize);
  193. HT_SET_DATA_ADDR(ht, data);
  194. HT_FLAGS(ht) = HASH_FLAG_STATIC_KEYS;
  195. HT_HASH_RESET(ht);
  196. }
  197. static zend_always_inline void zend_hash_real_init_ex(HashTable *ht, int packed)
  198. {
  199. HT_ASSERT_RC1(ht);
  200. ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED);
  201. if (packed) {
  202. zend_hash_real_init_packed_ex(ht);
  203. } else {
  204. zend_hash_real_init_mixed_ex(ht);
  205. }
  206. }
  207. static const uint32_t uninitialized_bucket[-HT_MIN_MASK] =
  208. {HT_INVALID_IDX, HT_INVALID_IDX};
  209. ZEND_API const HashTable zend_empty_array = {
  210. .gc.refcount = 2,
  211. .gc.u.type_info = IS_ARRAY | (GC_IMMUTABLE << GC_FLAGS_SHIFT),
  212. .u.flags = HASH_FLAG_UNINITIALIZED,
  213. .nTableMask = HT_MIN_MASK,
  214. .arData = (Bucket*)&uninitialized_bucket[2],
  215. .nNumUsed = 0,
  216. .nNumOfElements = 0,
  217. .nTableSize = HT_MIN_SIZE,
  218. .nInternalPointer = 0,
  219. .nNextFreeElement = 0,
  220. .pDestructor = ZVAL_PTR_DTOR
  221. };
  222. static zend_always_inline void _zend_hash_init_int(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
  223. {
  224. GC_SET_REFCOUNT(ht, 1);
  225. GC_TYPE_INFO(ht) = IS_ARRAY | (persistent ? (GC_PERSISTENT << GC_FLAGS_SHIFT) : (GC_COLLECTABLE << GC_FLAGS_SHIFT));
  226. HT_FLAGS(ht) = HASH_FLAG_UNINITIALIZED;
  227. ht->nTableMask = HT_MIN_MASK;
  228. HT_SET_DATA_ADDR(ht, &uninitialized_bucket);
  229. ht->nNumUsed = 0;
  230. ht->nNumOfElements = 0;
  231. ht->nInternalPointer = 0;
  232. ht->nNextFreeElement = ZEND_LONG_MIN;
  233. ht->pDestructor = pDestructor;
  234. ht->nTableSize = zend_hash_check_size(nSize);
  235. }
  236. ZEND_API void ZEND_FASTCALL _zend_hash_init(HashTable *ht, uint32_t nSize, dtor_func_t pDestructor, zend_bool persistent)
  237. {
  238. _zend_hash_init_int(ht, nSize, pDestructor, persistent);
  239. }
  240. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array_0(void)
  241. {
  242. HashTable *ht = emalloc(sizeof(HashTable));
  243. _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
  244. return ht;
  245. }
  246. ZEND_API HashTable* ZEND_FASTCALL _zend_new_array(uint32_t nSize)
  247. {
  248. HashTable *ht = emalloc(sizeof(HashTable));
  249. _zend_hash_init_int(ht, nSize, ZVAL_PTR_DTOR, 0);
  250. return ht;
  251. }
  252. ZEND_API HashTable* ZEND_FASTCALL zend_new_pair(zval *val1, zval *val2)
  253. {
  254. Bucket *p;
  255. HashTable *ht = emalloc(sizeof(HashTable));
  256. _zend_hash_init_int(ht, HT_MIN_SIZE, ZVAL_PTR_DTOR, 0);
  257. ht->nNumUsed = ht->nNumOfElements = ht->nNextFreeElement = 2;
  258. zend_hash_real_init_packed_ex(ht);
  259. p = ht->arData;
  260. ZVAL_COPY_VALUE(&p->val, val1);
  261. p->h = 0;
  262. p->key = NULL;
  263. p++;
  264. ZVAL_COPY_VALUE(&p->val, val2);
  265. p->h = 1;
  266. p->key = NULL;
  267. return ht;
  268. }
  269. static void ZEND_FASTCALL zend_hash_packed_grow(HashTable *ht)
  270. {
  271. HT_ASSERT_RC1(ht);
  272. if (ht->nTableSize >= HT_MAX_SIZE) {
  273. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket), sizeof(Bucket));
  274. }
  275. ht->nTableSize += ht->nTableSize;
  276. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  277. }
  278. ZEND_API void ZEND_FASTCALL zend_hash_real_init(HashTable *ht, zend_bool packed)
  279. {
  280. IS_CONSISTENT(ht);
  281. HT_ASSERT_RC1(ht);
  282. zend_hash_real_init_ex(ht, packed);
  283. }
  284. ZEND_API void ZEND_FASTCALL zend_hash_real_init_packed(HashTable *ht)
  285. {
  286. IS_CONSISTENT(ht);
  287. HT_ASSERT_RC1(ht);
  288. zend_hash_real_init_packed_ex(ht);
  289. }
  290. ZEND_API void ZEND_FASTCALL zend_hash_real_init_mixed(HashTable *ht)
  291. {
  292. IS_CONSISTENT(ht);
  293. HT_ASSERT_RC1(ht);
  294. zend_hash_real_init_mixed_ex(ht);
  295. }
  296. ZEND_API void ZEND_FASTCALL zend_hash_packed_to_hash(HashTable *ht)
  297. {
  298. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  299. Bucket *old_buckets = ht->arData;
  300. uint32_t nSize = ht->nTableSize;
  301. HT_ASSERT_RC1(ht);
  302. HT_FLAGS(ht) &= ~HASH_FLAG_PACKED;
  303. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  304. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  305. HT_SET_DATA_ADDR(ht, new_data);
  306. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  307. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  308. zend_hash_rehash(ht);
  309. }
  310. ZEND_API void ZEND_FASTCALL zend_hash_to_packed(HashTable *ht)
  311. {
  312. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  313. Bucket *old_buckets = ht->arData;
  314. HT_ASSERT_RC1(ht);
  315. new_data = pemalloc(HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  316. HT_FLAGS(ht) |= HASH_FLAG_PACKED | HASH_FLAG_STATIC_KEYS;
  317. ht->nTableMask = HT_MIN_MASK;
  318. HT_SET_DATA_ADDR(ht, new_data);
  319. HT_HASH_RESET_PACKED(ht);
  320. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  321. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  322. }
  323. ZEND_API void ZEND_FASTCALL zend_hash_extend(HashTable *ht, uint32_t nSize, zend_bool packed)
  324. {
  325. HT_ASSERT_RC1(ht);
  326. if (nSize == 0) return;
  327. if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  328. if (nSize > ht->nTableSize) {
  329. ht->nTableSize = zend_hash_check_size(nSize);
  330. }
  331. zend_hash_real_init(ht, packed);
  332. } else {
  333. if (packed) {
  334. ZEND_ASSERT(HT_FLAGS(ht) & HASH_FLAG_PACKED);
  335. if (nSize > ht->nTableSize) {
  336. ht->nTableSize = zend_hash_check_size(nSize);
  337. HT_SET_DATA_ADDR(ht, perealloc2(HT_GET_DATA_ADDR(ht), HT_SIZE_EX(ht->nTableSize, HT_MIN_MASK), HT_USED_SIZE(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT));
  338. }
  339. } else {
  340. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
  341. if (nSize > ht->nTableSize) {
  342. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  343. Bucket *old_buckets = ht->arData;
  344. nSize = zend_hash_check_size(nSize);
  345. ht->nTableSize = nSize;
  346. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  347. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  348. HT_SET_DATA_ADDR(ht, new_data);
  349. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  350. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  351. zend_hash_rehash(ht);
  352. }
  353. }
  354. }
  355. }
  356. ZEND_API void ZEND_FASTCALL zend_hash_discard(HashTable *ht, uint32_t nNumUsed)
  357. {
  358. Bucket *p, *end, *arData;
  359. uint32_t nIndex;
  360. arData = ht->arData;
  361. p = arData + ht->nNumUsed;
  362. end = arData + nNumUsed;
  363. ht->nNumUsed = nNumUsed;
  364. while (p != end) {
  365. p--;
  366. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  367. ht->nNumOfElements--;
  368. /* Collision pointers always directed from higher to lower buckets */
  369. #if 0
  370. if (!(Z_NEXT(p->val) == HT_INVALID_IDX || HT_HASH_TO_BUCKET_EX(arData, Z_NEXT(p->val)) < p)) {
  371. abort();
  372. }
  373. #endif
  374. nIndex = p->h | ht->nTableMask;
  375. HT_HASH_EX(arData, nIndex) = Z_NEXT(p->val);
  376. }
  377. }
  378. static uint32_t zend_array_recalc_elements(HashTable *ht)
  379. {
  380. zval *val;
  381. uint32_t num = ht->nNumOfElements;
  382. ZEND_HASH_FOREACH_VAL(ht, val) {
  383. if (Z_TYPE_P(val) == IS_INDIRECT) {
  384. if (UNEXPECTED(Z_TYPE_P(Z_INDIRECT_P(val)) == IS_UNDEF)) {
  385. num--;
  386. }
  387. }
  388. } ZEND_HASH_FOREACH_END();
  389. return num;
  390. }
  391. /* }}} */
  392. ZEND_API uint32_t zend_array_count(HashTable *ht)
  393. {
  394. uint32_t num;
  395. if (UNEXPECTED(HT_FLAGS(ht) & HASH_FLAG_HAS_EMPTY_IND)) {
  396. num = zend_array_recalc_elements(ht);
  397. if (UNEXPECTED(ht->nNumOfElements == num)) {
  398. HT_FLAGS(ht) &= ~HASH_FLAG_HAS_EMPTY_IND;
  399. }
  400. } else if (UNEXPECTED(ht == &EG(symbol_table))) {
  401. num = zend_array_recalc_elements(ht);
  402. } else {
  403. num = zend_hash_num_elements(ht);
  404. }
  405. return num;
  406. }
  407. /* }}} */
  408. static zend_always_inline HashPosition _zend_hash_get_valid_pos(const HashTable *ht, HashPosition pos)
  409. {
  410. while (pos < ht->nNumUsed && Z_ISUNDEF(ht->arData[pos].val)) {
  411. pos++;
  412. }
  413. return pos;
  414. }
  415. static zend_always_inline HashPosition _zend_hash_get_current_pos(const HashTable *ht)
  416. {
  417. return _zend_hash_get_valid_pos(ht, ht->nInternalPointer);
  418. }
  419. ZEND_API HashPosition ZEND_FASTCALL zend_hash_get_current_pos(const HashTable *ht)
  420. {
  421. return _zend_hash_get_current_pos(ht);
  422. }
  423. ZEND_API uint32_t ZEND_FASTCALL zend_hash_iterator_add(HashTable *ht, HashPosition pos)
  424. {
  425. HashTableIterator *iter = EG(ht_iterators);
  426. HashTableIterator *end = iter + EG(ht_iterators_count);
  427. uint32_t idx;
  428. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  429. HT_INC_ITERATORS_COUNT(ht);
  430. }
  431. while (iter != end) {
  432. if (iter->ht == NULL) {
  433. iter->ht = ht;
  434. iter->pos = pos;
  435. idx = iter - EG(ht_iterators);
  436. if (idx + 1 > EG(ht_iterators_used)) {
  437. EG(ht_iterators_used) = idx + 1;
  438. }
  439. return idx;
  440. }
  441. iter++;
  442. }
  443. if (EG(ht_iterators) == EG(ht_iterators_slots)) {
  444. EG(ht_iterators) = emalloc(sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  445. memcpy(EG(ht_iterators), EG(ht_iterators_slots), sizeof(HashTableIterator) * EG(ht_iterators_count));
  446. } else {
  447. EG(ht_iterators) = erealloc(EG(ht_iterators), sizeof(HashTableIterator) * (EG(ht_iterators_count) + 8));
  448. }
  449. iter = EG(ht_iterators) + EG(ht_iterators_count);
  450. EG(ht_iterators_count) += 8;
  451. iter->ht = ht;
  452. iter->pos = pos;
  453. memset(iter + 1, 0, sizeof(HashTableIterator) * 7);
  454. idx = iter - EG(ht_iterators);
  455. EG(ht_iterators_used) = idx + 1;
  456. return idx;
  457. }
  458. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos(uint32_t idx, HashTable *ht)
  459. {
  460. HashTableIterator *iter = EG(ht_iterators) + idx;
  461. ZEND_ASSERT(idx != (uint32_t)-1);
  462. if (UNEXPECTED(iter->ht != ht)) {
  463. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  464. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  465. HT_DEC_ITERATORS_COUNT(iter->ht);
  466. }
  467. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  468. HT_INC_ITERATORS_COUNT(ht);
  469. }
  470. iter->ht = ht;
  471. iter->pos = _zend_hash_get_current_pos(ht);
  472. }
  473. return iter->pos;
  474. }
  475. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterator_pos_ex(uint32_t idx, zval *array)
  476. {
  477. HashTable *ht = Z_ARRVAL_P(array);
  478. HashTableIterator *iter = EG(ht_iterators) + idx;
  479. ZEND_ASSERT(idx != (uint32_t)-1);
  480. if (UNEXPECTED(iter->ht != ht)) {
  481. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  482. && EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  483. HT_DEC_ITERATORS_COUNT(iter->ht);
  484. }
  485. SEPARATE_ARRAY(array);
  486. ht = Z_ARRVAL_P(array);
  487. if (EXPECTED(!HT_ITERATORS_OVERFLOW(ht))) {
  488. HT_INC_ITERATORS_COUNT(ht);
  489. }
  490. iter->ht = ht;
  491. iter->pos = _zend_hash_get_current_pos(ht);
  492. }
  493. return iter->pos;
  494. }
  495. ZEND_API void ZEND_FASTCALL zend_hash_iterator_del(uint32_t idx)
  496. {
  497. HashTableIterator *iter = EG(ht_iterators) + idx;
  498. ZEND_ASSERT(idx != (uint32_t)-1);
  499. if (EXPECTED(iter->ht) && EXPECTED(iter->ht != HT_POISONED_PTR)
  500. && EXPECTED(!HT_ITERATORS_OVERFLOW(iter->ht))) {
  501. ZEND_ASSERT(HT_ITERATORS_COUNT(iter->ht) != 0);
  502. HT_DEC_ITERATORS_COUNT(iter->ht);
  503. }
  504. iter->ht = NULL;
  505. if (idx == EG(ht_iterators_used) - 1) {
  506. while (idx > 0 && EG(ht_iterators)[idx - 1].ht == NULL) {
  507. idx--;
  508. }
  509. EG(ht_iterators_used) = idx;
  510. }
  511. }
  512. static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht)
  513. {
  514. HashTableIterator *iter = EG(ht_iterators);
  515. HashTableIterator *end = iter + EG(ht_iterators_used);
  516. while (iter != end) {
  517. if (iter->ht == ht) {
  518. iter->ht = HT_POISONED_PTR;
  519. }
  520. iter++;
  521. }
  522. }
  523. static zend_always_inline void zend_hash_iterators_remove(HashTable *ht)
  524. {
  525. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  526. _zend_hash_iterators_remove(ht);
  527. }
  528. }
  529. ZEND_API HashPosition ZEND_FASTCALL zend_hash_iterators_lower_pos(HashTable *ht, HashPosition start)
  530. {
  531. HashTableIterator *iter = EG(ht_iterators);
  532. HashTableIterator *end = iter + EG(ht_iterators_used);
  533. HashPosition res = ht->nNumUsed;
  534. while (iter != end) {
  535. if (iter->ht == ht) {
  536. if (iter->pos >= start && iter->pos < res) {
  537. res = iter->pos;
  538. }
  539. }
  540. iter++;
  541. }
  542. return res;
  543. }
  544. ZEND_API void ZEND_FASTCALL _zend_hash_iterators_update(HashTable *ht, HashPosition from, HashPosition to)
  545. {
  546. HashTableIterator *iter = EG(ht_iterators);
  547. HashTableIterator *end = iter + EG(ht_iterators_used);
  548. while (iter != end) {
  549. if (iter->ht == ht && iter->pos == from) {
  550. iter->pos = to;
  551. }
  552. iter++;
  553. }
  554. }
  555. ZEND_API void ZEND_FASTCALL zend_hash_iterators_advance(HashTable *ht, HashPosition step)
  556. {
  557. HashTableIterator *iter = EG(ht_iterators);
  558. HashTableIterator *end = iter + EG(ht_iterators_used);
  559. while (iter != end) {
  560. if (iter->ht == ht) {
  561. iter->pos += step;
  562. }
  563. iter++;
  564. }
  565. }
  566. static zend_always_inline Bucket *zend_hash_find_bucket(const HashTable *ht, zend_string *key, zend_bool known_hash)
  567. {
  568. zend_ulong h;
  569. uint32_t nIndex;
  570. uint32_t idx;
  571. Bucket *p, *arData;
  572. if (known_hash) {
  573. h = ZSTR_H(key);
  574. } else {
  575. h = zend_string_hash_val(key);
  576. }
  577. arData = ht->arData;
  578. nIndex = h | ht->nTableMask;
  579. idx = HT_HASH_EX(arData, nIndex);
  580. if (UNEXPECTED(idx == HT_INVALID_IDX)) {
  581. return NULL;
  582. }
  583. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  584. if (EXPECTED(p->key == key)) { /* check for the same interned string */
  585. return p;
  586. }
  587. while (1) {
  588. if (p->h == ZSTR_H(key) &&
  589. EXPECTED(p->key) &&
  590. zend_string_equal_content(p->key, key)) {
  591. return p;
  592. }
  593. idx = Z_NEXT(p->val);
  594. if (idx == HT_INVALID_IDX) {
  595. return NULL;
  596. }
  597. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  598. if (p->key == key) { /* check for the same interned string */
  599. return p;
  600. }
  601. }
  602. }
  603. static zend_always_inline Bucket *zend_hash_str_find_bucket(const HashTable *ht, const char *str, size_t len, zend_ulong h)
  604. {
  605. uint32_t nIndex;
  606. uint32_t idx;
  607. Bucket *p, *arData;
  608. arData = ht->arData;
  609. nIndex = h | ht->nTableMask;
  610. idx = HT_HASH_EX(arData, nIndex);
  611. while (idx != HT_INVALID_IDX) {
  612. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  613. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  614. if ((p->h == h)
  615. && p->key
  616. && (ZSTR_LEN(p->key) == len)
  617. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  618. return p;
  619. }
  620. idx = Z_NEXT(p->val);
  621. }
  622. return NULL;
  623. }
  624. static zend_always_inline Bucket *zend_hash_index_find_bucket(const HashTable *ht, zend_ulong h)
  625. {
  626. uint32_t nIndex;
  627. uint32_t idx;
  628. Bucket *p, *arData;
  629. arData = ht->arData;
  630. nIndex = h | ht->nTableMask;
  631. idx = HT_HASH_EX(arData, nIndex);
  632. while (idx != HT_INVALID_IDX) {
  633. ZEND_ASSERT(idx < HT_IDX_TO_HASH(ht->nTableSize));
  634. p = HT_HASH_TO_BUCKET_EX(arData, idx);
  635. if (p->h == h && !p->key) {
  636. return p;
  637. }
  638. idx = Z_NEXT(p->val);
  639. }
  640. return NULL;
  641. }
  642. static zend_always_inline zval *_zend_hash_add_or_update_i(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  643. {
  644. zend_ulong h;
  645. uint32_t nIndex;
  646. uint32_t idx;
  647. Bucket *p, *arData;
  648. IS_CONSISTENT(ht);
  649. HT_ASSERT_RC1(ht);
  650. if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
  651. if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  652. zend_hash_real_init_mixed(ht);
  653. if (!ZSTR_IS_INTERNED(key)) {
  654. zend_string_addref(key);
  655. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  656. zend_string_hash_val(key);
  657. }
  658. goto add_to_hash;
  659. } else {
  660. zend_hash_packed_to_hash(ht);
  661. if (!ZSTR_IS_INTERNED(key)) {
  662. zend_string_addref(key);
  663. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  664. zend_string_hash_val(key);
  665. }
  666. }
  667. } else if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
  668. p = zend_hash_find_bucket(ht, key, 0);
  669. if (p) {
  670. zval *data;
  671. ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
  672. if (flag & HASH_ADD) {
  673. if (!(flag & HASH_UPDATE_INDIRECT)) {
  674. return NULL;
  675. }
  676. ZEND_ASSERT(&p->val != pData);
  677. data = &p->val;
  678. if (Z_TYPE_P(data) == IS_INDIRECT) {
  679. data = Z_INDIRECT_P(data);
  680. if (Z_TYPE_P(data) != IS_UNDEF) {
  681. return NULL;
  682. }
  683. } else {
  684. return NULL;
  685. }
  686. } else {
  687. ZEND_ASSERT(&p->val != pData);
  688. data = &p->val;
  689. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  690. data = Z_INDIRECT_P(data);
  691. }
  692. }
  693. if (ht->pDestructor) {
  694. ht->pDestructor(data);
  695. }
  696. ZVAL_COPY_VALUE(data, pData);
  697. return data;
  698. }
  699. if (!ZSTR_IS_INTERNED(key)) {
  700. zend_string_addref(key);
  701. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  702. }
  703. } else if (!ZSTR_IS_INTERNED(key)) {
  704. zend_string_addref(key);
  705. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  706. zend_string_hash_val(key);
  707. }
  708. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  709. add_to_hash:
  710. idx = ht->nNumUsed++;
  711. ht->nNumOfElements++;
  712. arData = ht->arData;
  713. p = arData + idx;
  714. p->key = key;
  715. p->h = h = ZSTR_H(key);
  716. nIndex = h | ht->nTableMask;
  717. Z_NEXT(p->val) = HT_HASH_EX(arData, nIndex);
  718. HT_HASH_EX(arData, nIndex) = HT_IDX_TO_HASH(idx);
  719. ZVAL_COPY_VALUE(&p->val, pData);
  720. return &p->val;
  721. }
  722. static zend_always_inline zval *_zend_hash_str_add_or_update_i(HashTable *ht, const char *str, size_t len, zend_ulong h, zval *pData, uint32_t flag)
  723. {
  724. zend_string *key;
  725. uint32_t nIndex;
  726. uint32_t idx;
  727. Bucket *p;
  728. IS_CONSISTENT(ht);
  729. HT_ASSERT_RC1(ht);
  730. if (UNEXPECTED(HT_FLAGS(ht) & (HASH_FLAG_UNINITIALIZED|HASH_FLAG_PACKED))) {
  731. if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  732. zend_hash_real_init_mixed(ht);
  733. goto add_to_hash;
  734. } else {
  735. zend_hash_packed_to_hash(ht);
  736. }
  737. } else if ((flag & HASH_ADD_NEW) == 0) {
  738. p = zend_hash_str_find_bucket(ht, str, len, h);
  739. if (p) {
  740. zval *data;
  741. if (flag & HASH_ADD) {
  742. if (!(flag & HASH_UPDATE_INDIRECT)) {
  743. return NULL;
  744. }
  745. ZEND_ASSERT(&p->val != pData);
  746. data = &p->val;
  747. if (Z_TYPE_P(data) == IS_INDIRECT) {
  748. data = Z_INDIRECT_P(data);
  749. if (Z_TYPE_P(data) != IS_UNDEF) {
  750. return NULL;
  751. }
  752. } else {
  753. return NULL;
  754. }
  755. } else {
  756. ZEND_ASSERT(&p->val != pData);
  757. data = &p->val;
  758. if ((flag & HASH_UPDATE_INDIRECT) && Z_TYPE_P(data) == IS_INDIRECT) {
  759. data = Z_INDIRECT_P(data);
  760. }
  761. }
  762. if (ht->pDestructor) {
  763. ht->pDestructor(data);
  764. }
  765. ZVAL_COPY_VALUE(data, pData);
  766. return data;
  767. }
  768. }
  769. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  770. add_to_hash:
  771. idx = ht->nNumUsed++;
  772. ht->nNumOfElements++;
  773. p = ht->arData + idx;
  774. p->key = key = zend_string_init(str, len, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  775. p->h = ZSTR_H(key) = h;
  776. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  777. ZVAL_COPY_VALUE(&p->val, pData);
  778. nIndex = h | ht->nTableMask;
  779. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  780. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  781. return &p->val;
  782. }
  783. ZEND_API zval* ZEND_FASTCALL zend_hash_add_or_update(HashTable *ht, zend_string *key, zval *pData, uint32_t flag)
  784. {
  785. if (flag == HASH_ADD) {
  786. return zend_hash_add(ht, key, pData);
  787. } else if (flag == HASH_ADD_NEW) {
  788. return zend_hash_add_new(ht, key, pData);
  789. } else if (flag == HASH_UPDATE) {
  790. return zend_hash_update(ht, key, pData);
  791. } else {
  792. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  793. return zend_hash_update_ind(ht, key, pData);
  794. }
  795. }
  796. ZEND_API zval* ZEND_FASTCALL zend_hash_add(HashTable *ht, zend_string *key, zval *pData)
  797. {
  798. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD);
  799. }
  800. ZEND_API zval* ZEND_FASTCALL zend_hash_update(HashTable *ht, zend_string *key, zval *pData)
  801. {
  802. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE);
  803. }
  804. ZEND_API zval* ZEND_FASTCALL zend_hash_update_ind(HashTable *ht, zend_string *key, zval *pData)
  805. {
  806. return _zend_hash_add_or_update_i(ht, key, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  807. }
  808. ZEND_API zval* ZEND_FASTCALL zend_hash_add_new(HashTable *ht, zend_string *key, zval *pData)
  809. {
  810. return _zend_hash_add_or_update_i(ht, key, pData, HASH_ADD_NEW);
  811. }
  812. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_or_update(HashTable *ht, const char *str, size_t len, zval *pData, uint32_t flag)
  813. {
  814. if (flag == HASH_ADD) {
  815. return zend_hash_str_add(ht, str, len, pData);
  816. } else if (flag == HASH_ADD_NEW) {
  817. return zend_hash_str_add_new(ht, str, len, pData);
  818. } else if (flag == HASH_UPDATE) {
  819. return zend_hash_str_update(ht, str, len, pData);
  820. } else {
  821. ZEND_ASSERT(flag == (HASH_UPDATE|HASH_UPDATE_INDIRECT));
  822. return zend_hash_str_update_ind(ht, str, len, pData);
  823. }
  824. }
  825. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update(HashTable *ht, const char *str, size_t len, zval *pData)
  826. {
  827. zend_ulong h = zend_hash_func(str, len);
  828. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE);
  829. }
  830. ZEND_API zval* ZEND_FASTCALL zend_hash_str_update_ind(HashTable *ht, const char *str, size_t len, zval *pData)
  831. {
  832. zend_ulong h = zend_hash_func(str, len);
  833. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_UPDATE | HASH_UPDATE_INDIRECT);
  834. }
  835. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add(HashTable *ht, const char *str, size_t len, zval *pData)
  836. {
  837. zend_ulong h = zend_hash_func(str, len);
  838. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD);
  839. }
  840. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_new(HashTable *ht, const char *str, size_t len, zval *pData)
  841. {
  842. zend_ulong h = zend_hash_func(str, len);
  843. return _zend_hash_str_add_or_update_i(ht, str, len, h, pData, HASH_ADD_NEW);
  844. }
  845. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_empty_element(HashTable *ht, zend_ulong h)
  846. {
  847. zval dummy;
  848. ZVAL_NULL(&dummy);
  849. return zend_hash_index_add(ht, h, &dummy);
  850. }
  851. ZEND_API zval* ZEND_FASTCALL zend_hash_add_empty_element(HashTable *ht, zend_string *key)
  852. {
  853. zval dummy;
  854. ZVAL_NULL(&dummy);
  855. return zend_hash_add(ht, key, &dummy);
  856. }
  857. ZEND_API zval* ZEND_FASTCALL zend_hash_str_add_empty_element(HashTable *ht, const char *str, size_t len)
  858. {
  859. zval dummy;
  860. ZVAL_NULL(&dummy);
  861. return zend_hash_str_add(ht, str, len, &dummy);
  862. }
  863. static zend_always_inline zval *_zend_hash_index_add_or_update_i(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  864. {
  865. uint32_t nIndex;
  866. uint32_t idx;
  867. Bucket *p;
  868. IS_CONSISTENT(ht);
  869. HT_ASSERT_RC1(ht);
  870. if ((flag & HASH_ADD_NEXT) && h == ZEND_LONG_MIN) {
  871. h = 0;
  872. }
  873. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  874. if (h < ht->nNumUsed) {
  875. p = ht->arData + h;
  876. if (Z_TYPE(p->val) != IS_UNDEF) {
  877. replace:
  878. if (flag & HASH_ADD) {
  879. return NULL;
  880. }
  881. if (ht->pDestructor) {
  882. ht->pDestructor(&p->val);
  883. }
  884. ZVAL_COPY_VALUE(&p->val, pData);
  885. return &p->val;
  886. } else { /* we have to keep the order :( */
  887. goto convert_to_hash;
  888. }
  889. } else if (EXPECTED(h < ht->nTableSize)) {
  890. add_to_packed:
  891. p = ht->arData + h;
  892. /* incremental initialization of empty Buckets */
  893. if ((flag & (HASH_ADD_NEW|HASH_ADD_NEXT)) != (HASH_ADD_NEW|HASH_ADD_NEXT)) {
  894. if (h > ht->nNumUsed) {
  895. Bucket *q = ht->arData + ht->nNumUsed;
  896. while (q != p) {
  897. ZVAL_UNDEF(&q->val);
  898. q++;
  899. }
  900. }
  901. }
  902. ht->nNextFreeElement = ht->nNumUsed = h + 1;
  903. goto add;
  904. } else if ((h >> 1) < ht->nTableSize &&
  905. (ht->nTableSize >> 1) < ht->nNumOfElements) {
  906. zend_hash_packed_grow(ht);
  907. goto add_to_packed;
  908. } else {
  909. if (ht->nNumUsed >= ht->nTableSize) {
  910. ht->nTableSize += ht->nTableSize;
  911. }
  912. convert_to_hash:
  913. zend_hash_packed_to_hash(ht);
  914. }
  915. } else if (HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED) {
  916. if (h < ht->nTableSize) {
  917. zend_hash_real_init_packed_ex(ht);
  918. goto add_to_packed;
  919. }
  920. zend_hash_real_init_mixed(ht);
  921. } else {
  922. if ((flag & HASH_ADD_NEW) == 0 || ZEND_DEBUG) {
  923. p = zend_hash_index_find_bucket(ht, h);
  924. if (p) {
  925. ZEND_ASSERT((flag & HASH_ADD_NEW) == 0);
  926. goto replace;
  927. }
  928. }
  929. ZEND_HASH_IF_FULL_DO_RESIZE(ht); /* If the Hash table is full, resize it */
  930. }
  931. idx = ht->nNumUsed++;
  932. nIndex = h | ht->nTableMask;
  933. p = ht->arData + idx;
  934. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  935. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(idx);
  936. if ((zend_long)h >= ht->nNextFreeElement) {
  937. ht->nNextFreeElement = (zend_long)h < ZEND_LONG_MAX ? h + 1 : ZEND_LONG_MAX;
  938. }
  939. add:
  940. ht->nNumOfElements++;
  941. p->h = h;
  942. p->key = NULL;
  943. ZVAL_COPY_VALUE(&p->val, pData);
  944. return &p->val;
  945. }
  946. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_or_update(HashTable *ht, zend_ulong h, zval *pData, uint32_t flag)
  947. {
  948. if (flag == HASH_ADD) {
  949. return zend_hash_index_add(ht, h, pData);
  950. } else if (flag == (HASH_ADD|HASH_ADD_NEW)) {
  951. return zend_hash_index_add_new(ht, h, pData);
  952. } else if (flag == (HASH_ADD|HASH_ADD_NEXT)) {
  953. ZEND_ASSERT(h == ht->nNextFreeElement);
  954. return zend_hash_next_index_insert(ht, pData);
  955. } else if (flag == (HASH_ADD|HASH_ADD_NEW|HASH_ADD_NEXT)) {
  956. ZEND_ASSERT(h == ht->nNextFreeElement);
  957. return zend_hash_next_index_insert_new(ht, pData);
  958. } else {
  959. ZEND_ASSERT(flag == HASH_UPDATE);
  960. return zend_hash_index_update(ht, h, pData);
  961. }
  962. }
  963. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add(HashTable *ht, zend_ulong h, zval *pData)
  964. {
  965. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD);
  966. }
  967. ZEND_API zval* ZEND_FASTCALL zend_hash_index_add_new(HashTable *ht, zend_ulong h, zval *pData)
  968. {
  969. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_ADD | HASH_ADD_NEW);
  970. }
  971. ZEND_API zval* ZEND_FASTCALL zend_hash_index_update(HashTable *ht, zend_ulong h, zval *pData)
  972. {
  973. return _zend_hash_index_add_or_update_i(ht, h, pData, HASH_UPDATE);
  974. }
  975. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert(HashTable *ht, zval *pData)
  976. {
  977. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEXT);
  978. }
  979. ZEND_API zval* ZEND_FASTCALL zend_hash_next_index_insert_new(HashTable *ht, zval *pData)
  980. {
  981. return _zend_hash_index_add_or_update_i(ht, ht->nNextFreeElement, pData, HASH_ADD | HASH_ADD_NEW | HASH_ADD_NEXT);
  982. }
  983. ZEND_API zval* ZEND_FASTCALL zend_hash_set_bucket_key(HashTable *ht, Bucket *b, zend_string *key)
  984. {
  985. uint32_t nIndex;
  986. uint32_t idx, i;
  987. Bucket *p, *arData;
  988. IS_CONSISTENT(ht);
  989. HT_ASSERT_RC1(ht);
  990. ZEND_ASSERT(!(HT_FLAGS(ht) & HASH_FLAG_PACKED));
  991. p = zend_hash_find_bucket(ht, key, 0);
  992. if (UNEXPECTED(p)) {
  993. return (p == b) ? &p->val : NULL;
  994. }
  995. if (!ZSTR_IS_INTERNED(key)) {
  996. zend_string_addref(key);
  997. HT_FLAGS(ht) &= ~HASH_FLAG_STATIC_KEYS;
  998. }
  999. arData = ht->arData;
  1000. /* del from hash */
  1001. idx = HT_IDX_TO_HASH(b - arData);
  1002. nIndex = b->h | ht->nTableMask;
  1003. i = HT_HASH_EX(arData, nIndex);
  1004. if (i == idx) {
  1005. HT_HASH_EX(arData, nIndex) = Z_NEXT(b->val);
  1006. } else {
  1007. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1008. while (Z_NEXT(p->val) != idx) {
  1009. i = Z_NEXT(p->val);
  1010. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1011. }
  1012. Z_NEXT(p->val) = Z_NEXT(b->val);
  1013. }
  1014. zend_string_release(b->key);
  1015. /* add to hash */
  1016. idx = b - arData;
  1017. b->key = key;
  1018. b->h = ZSTR_H(key);
  1019. nIndex = b->h | ht->nTableMask;
  1020. idx = HT_IDX_TO_HASH(idx);
  1021. i = HT_HASH_EX(arData, nIndex);
  1022. if (i == HT_INVALID_IDX || i < idx) {
  1023. Z_NEXT(b->val) = i;
  1024. HT_HASH_EX(arData, nIndex) = idx;
  1025. } else {
  1026. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1027. while (Z_NEXT(p->val) != HT_INVALID_IDX && Z_NEXT(p->val) > idx) {
  1028. i = Z_NEXT(p->val);
  1029. p = HT_HASH_TO_BUCKET_EX(arData, i);
  1030. }
  1031. Z_NEXT(b->val) = Z_NEXT(p->val);
  1032. Z_NEXT(p->val) = idx;
  1033. }
  1034. return &b->val;
  1035. }
  1036. static void ZEND_FASTCALL zend_hash_do_resize(HashTable *ht)
  1037. {
  1038. IS_CONSISTENT(ht);
  1039. HT_ASSERT_RC1(ht);
  1040. if (ht->nNumUsed > ht->nNumOfElements + (ht->nNumOfElements >> 5)) { /* additional term is there to amortize the cost of compaction */
  1041. zend_hash_rehash(ht);
  1042. } else if (ht->nTableSize < HT_MAX_SIZE) { /* Let's double the table size */
  1043. void *new_data, *old_data = HT_GET_DATA_ADDR(ht);
  1044. uint32_t nSize = ht->nTableSize + ht->nTableSize;
  1045. Bucket *old_buckets = ht->arData;
  1046. ht->nTableSize = nSize;
  1047. new_data = pemalloc(HT_SIZE_EX(nSize, HT_SIZE_TO_MASK(nSize)), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1048. ht->nTableMask = HT_SIZE_TO_MASK(ht->nTableSize);
  1049. HT_SET_DATA_ADDR(ht, new_data);
  1050. memcpy(ht->arData, old_buckets, sizeof(Bucket) * ht->nNumUsed);
  1051. pefree(old_data, GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1052. zend_hash_rehash(ht);
  1053. } else {
  1054. zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%u * %zu + %zu)", ht->nTableSize * 2, sizeof(Bucket) + sizeof(uint32_t), sizeof(Bucket));
  1055. }
  1056. }
  1057. ZEND_API void ZEND_FASTCALL zend_hash_rehash(HashTable *ht)
  1058. {
  1059. Bucket *p;
  1060. uint32_t nIndex, i;
  1061. IS_CONSISTENT(ht);
  1062. if (UNEXPECTED(ht->nNumOfElements == 0)) {
  1063. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1064. ht->nNumUsed = 0;
  1065. HT_HASH_RESET(ht);
  1066. }
  1067. return;
  1068. }
  1069. HT_HASH_RESET(ht);
  1070. i = 0;
  1071. p = ht->arData;
  1072. if (HT_IS_WITHOUT_HOLES(ht)) {
  1073. do {
  1074. nIndex = p->h | ht->nTableMask;
  1075. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  1076. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  1077. p++;
  1078. } while (++i < ht->nNumUsed);
  1079. } else {
  1080. uint32_t old_num_used = ht->nNumUsed;
  1081. do {
  1082. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) {
  1083. uint32_t j = i;
  1084. Bucket *q = p;
  1085. if (EXPECTED(!HT_HAS_ITERATORS(ht))) {
  1086. while (++i < ht->nNumUsed) {
  1087. p++;
  1088. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  1089. ZVAL_COPY_VALUE(&q->val, &p->val);
  1090. q->h = p->h;
  1091. nIndex = q->h | ht->nTableMask;
  1092. q->key = p->key;
  1093. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  1094. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  1095. if (UNEXPECTED(ht->nInternalPointer == i)) {
  1096. ht->nInternalPointer = j;
  1097. }
  1098. q++;
  1099. j++;
  1100. }
  1101. }
  1102. } else {
  1103. uint32_t iter_pos = zend_hash_iterators_lower_pos(ht, 0);
  1104. while (++i < ht->nNumUsed) {
  1105. p++;
  1106. if (EXPECTED(Z_TYPE_INFO(p->val) != IS_UNDEF)) {
  1107. ZVAL_COPY_VALUE(&q->val, &p->val);
  1108. q->h = p->h;
  1109. nIndex = q->h | ht->nTableMask;
  1110. q->key = p->key;
  1111. Z_NEXT(q->val) = HT_HASH(ht, nIndex);
  1112. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(j);
  1113. if (UNEXPECTED(ht->nInternalPointer == i)) {
  1114. ht->nInternalPointer = j;
  1115. }
  1116. if (UNEXPECTED(i >= iter_pos)) {
  1117. do {
  1118. zend_hash_iterators_update(ht, iter_pos, j);
  1119. iter_pos = zend_hash_iterators_lower_pos(ht, iter_pos + 1);
  1120. } while (iter_pos < i);
  1121. }
  1122. q++;
  1123. j++;
  1124. }
  1125. }
  1126. }
  1127. ht->nNumUsed = j;
  1128. break;
  1129. }
  1130. nIndex = p->h | ht->nTableMask;
  1131. Z_NEXT(p->val) = HT_HASH(ht, nIndex);
  1132. HT_HASH(ht, nIndex) = HT_IDX_TO_HASH(i);
  1133. p++;
  1134. } while (++i < ht->nNumUsed);
  1135. /* Migrate pointer to one past the end of the array to the new one past the end, so that
  1136. * newly inserted elements are picked up correctly. */
  1137. if (UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1138. _zend_hash_iterators_update(ht, old_num_used, ht->nNumUsed);
  1139. }
  1140. }
  1141. }
  1142. static zend_always_inline void _zend_hash_del_el_ex(HashTable *ht, uint32_t idx, Bucket *p, Bucket *prev)
  1143. {
  1144. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1145. if (prev) {
  1146. Z_NEXT(prev->val) = Z_NEXT(p->val);
  1147. } else {
  1148. HT_HASH(ht, p->h | ht->nTableMask) = Z_NEXT(p->val);
  1149. }
  1150. }
  1151. idx = HT_HASH_TO_IDX(idx);
  1152. ht->nNumOfElements--;
  1153. if (ht->nInternalPointer == idx || UNEXPECTED(HT_HAS_ITERATORS(ht))) {
  1154. uint32_t new_idx;
  1155. new_idx = idx;
  1156. while (1) {
  1157. new_idx++;
  1158. if (new_idx >= ht->nNumUsed) {
  1159. break;
  1160. } else if (Z_TYPE(ht->arData[new_idx].val) != IS_UNDEF) {
  1161. break;
  1162. }
  1163. }
  1164. if (ht->nInternalPointer == idx) {
  1165. ht->nInternalPointer = new_idx;
  1166. }
  1167. zend_hash_iterators_update(ht, idx, new_idx);
  1168. }
  1169. if (ht->nNumUsed - 1 == idx) {
  1170. do {
  1171. ht->nNumUsed--;
  1172. } while (ht->nNumUsed > 0 && (UNEXPECTED(Z_TYPE(ht->arData[ht->nNumUsed-1].val) == IS_UNDEF)));
  1173. ht->nInternalPointer = MIN(ht->nInternalPointer, ht->nNumUsed);
  1174. }
  1175. if (p->key) {
  1176. zend_string_release(p->key);
  1177. }
  1178. if (ht->pDestructor) {
  1179. zval tmp;
  1180. ZVAL_COPY_VALUE(&tmp, &p->val);
  1181. ZVAL_UNDEF(&p->val);
  1182. ht->pDestructor(&tmp);
  1183. } else {
  1184. ZVAL_UNDEF(&p->val);
  1185. }
  1186. }
  1187. static zend_always_inline void _zend_hash_del_el(HashTable *ht, uint32_t idx, Bucket *p)
  1188. {
  1189. Bucket *prev = NULL;
  1190. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1191. uint32_t nIndex = p->h | ht->nTableMask;
  1192. uint32_t i = HT_HASH(ht, nIndex);
  1193. if (i != idx) {
  1194. prev = HT_HASH_TO_BUCKET(ht, i);
  1195. while (Z_NEXT(prev->val) != idx) {
  1196. i = Z_NEXT(prev->val);
  1197. prev = HT_HASH_TO_BUCKET(ht, i);
  1198. }
  1199. }
  1200. }
  1201. _zend_hash_del_el_ex(ht, idx, p, prev);
  1202. }
  1203. ZEND_API void ZEND_FASTCALL zend_hash_del_bucket(HashTable *ht, Bucket *p)
  1204. {
  1205. IS_CONSISTENT(ht);
  1206. HT_ASSERT_RC1(ht);
  1207. _zend_hash_del_el(ht, HT_IDX_TO_HASH(p - ht->arData), p);
  1208. }
  1209. ZEND_API int ZEND_FASTCALL zend_hash_del(HashTable *ht, zend_string *key)
  1210. {
  1211. zend_ulong h;
  1212. uint32_t nIndex;
  1213. uint32_t idx;
  1214. Bucket *p;
  1215. Bucket *prev = NULL;
  1216. IS_CONSISTENT(ht);
  1217. HT_ASSERT_RC1(ht);
  1218. h = zend_string_hash_val(key);
  1219. nIndex = h | ht->nTableMask;
  1220. idx = HT_HASH(ht, nIndex);
  1221. while (idx != HT_INVALID_IDX) {
  1222. p = HT_HASH_TO_BUCKET(ht, idx);
  1223. if ((p->key == key) ||
  1224. (p->h == h &&
  1225. p->key &&
  1226. zend_string_equal_content(p->key, key))) {
  1227. _zend_hash_del_el_ex(ht, idx, p, prev);
  1228. return SUCCESS;
  1229. }
  1230. prev = p;
  1231. idx = Z_NEXT(p->val);
  1232. }
  1233. return FAILURE;
  1234. }
  1235. ZEND_API int ZEND_FASTCALL zend_hash_del_ind(HashTable *ht, zend_string *key)
  1236. {
  1237. zend_ulong h;
  1238. uint32_t nIndex;
  1239. uint32_t idx;
  1240. Bucket *p;
  1241. Bucket *prev = NULL;
  1242. IS_CONSISTENT(ht);
  1243. HT_ASSERT_RC1(ht);
  1244. h = zend_string_hash_val(key);
  1245. nIndex = h | ht->nTableMask;
  1246. idx = HT_HASH(ht, nIndex);
  1247. while (idx != HT_INVALID_IDX) {
  1248. p = HT_HASH_TO_BUCKET(ht, idx);
  1249. if ((p->key == key) ||
  1250. (p->h == h &&
  1251. p->key &&
  1252. zend_string_equal_content(p->key, key))) {
  1253. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1254. zval *data = Z_INDIRECT(p->val);
  1255. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1256. return FAILURE;
  1257. } else {
  1258. if (ht->pDestructor) {
  1259. zval tmp;
  1260. ZVAL_COPY_VALUE(&tmp, data);
  1261. ZVAL_UNDEF(data);
  1262. ht->pDestructor(&tmp);
  1263. } else {
  1264. ZVAL_UNDEF(data);
  1265. }
  1266. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1267. }
  1268. } else {
  1269. _zend_hash_del_el_ex(ht, idx, p, prev);
  1270. }
  1271. return SUCCESS;
  1272. }
  1273. prev = p;
  1274. idx = Z_NEXT(p->val);
  1275. }
  1276. return FAILURE;
  1277. }
  1278. ZEND_API int ZEND_FASTCALL zend_hash_str_del_ind(HashTable *ht, const char *str, size_t len)
  1279. {
  1280. zend_ulong h;
  1281. uint32_t nIndex;
  1282. uint32_t idx;
  1283. Bucket *p;
  1284. Bucket *prev = NULL;
  1285. IS_CONSISTENT(ht);
  1286. HT_ASSERT_RC1(ht);
  1287. h = zend_inline_hash_func(str, len);
  1288. nIndex = h | ht->nTableMask;
  1289. idx = HT_HASH(ht, nIndex);
  1290. while (idx != HT_INVALID_IDX) {
  1291. p = HT_HASH_TO_BUCKET(ht, idx);
  1292. if ((p->h == h)
  1293. && p->key
  1294. && (ZSTR_LEN(p->key) == len)
  1295. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1296. if (Z_TYPE(p->val) == IS_INDIRECT) {
  1297. zval *data = Z_INDIRECT(p->val);
  1298. if (UNEXPECTED(Z_TYPE_P(data) == IS_UNDEF)) {
  1299. return FAILURE;
  1300. } else {
  1301. if (ht->pDestructor) {
  1302. ht->pDestructor(data);
  1303. }
  1304. ZVAL_UNDEF(data);
  1305. HT_FLAGS(ht) |= HASH_FLAG_HAS_EMPTY_IND;
  1306. }
  1307. } else {
  1308. _zend_hash_del_el_ex(ht, idx, p, prev);
  1309. }
  1310. return SUCCESS;
  1311. }
  1312. prev = p;
  1313. idx = Z_NEXT(p->val);
  1314. }
  1315. return FAILURE;
  1316. }
  1317. ZEND_API int ZEND_FASTCALL zend_hash_str_del(HashTable *ht, const char *str, size_t len)
  1318. {
  1319. zend_ulong h;
  1320. uint32_t nIndex;
  1321. uint32_t idx;
  1322. Bucket *p;
  1323. Bucket *prev = NULL;
  1324. IS_CONSISTENT(ht);
  1325. HT_ASSERT_RC1(ht);
  1326. h = zend_inline_hash_func(str, len);
  1327. nIndex = h | ht->nTableMask;
  1328. idx = HT_HASH(ht, nIndex);
  1329. while (idx != HT_INVALID_IDX) {
  1330. p = HT_HASH_TO_BUCKET(ht, idx);
  1331. if ((p->h == h)
  1332. && p->key
  1333. && (ZSTR_LEN(p->key) == len)
  1334. && !memcmp(ZSTR_VAL(p->key), str, len)) {
  1335. _zend_hash_del_el_ex(ht, idx, p, prev);
  1336. return SUCCESS;
  1337. }
  1338. prev = p;
  1339. idx = Z_NEXT(p->val);
  1340. }
  1341. return FAILURE;
  1342. }
  1343. ZEND_API int ZEND_FASTCALL zend_hash_index_del(HashTable *ht, zend_ulong h)
  1344. {
  1345. uint32_t nIndex;
  1346. uint32_t idx;
  1347. Bucket *p;
  1348. Bucket *prev = NULL;
  1349. IS_CONSISTENT(ht);
  1350. HT_ASSERT_RC1(ht);
  1351. if (HT_FLAGS(ht) & HASH_FLAG_PACKED) {
  1352. if (h < ht->nNumUsed) {
  1353. p = ht->arData + h;
  1354. if (Z_TYPE(p->val) != IS_UNDEF) {
  1355. _zend_hash_del_el_ex(ht, HT_IDX_TO_HASH(h), p, NULL);
  1356. return SUCCESS;
  1357. }
  1358. }
  1359. return FAILURE;
  1360. }
  1361. nIndex = h | ht->nTableMask;
  1362. idx = HT_HASH(ht, nIndex);
  1363. while (idx != HT_INVALID_IDX) {
  1364. p = HT_HASH_TO_BUCKET(ht, idx);
  1365. if ((p->h == h) && (p->key == NULL)) {
  1366. _zend_hash_del_el_ex(ht, idx, p, prev);
  1367. return SUCCESS;
  1368. }
  1369. prev = p;
  1370. idx = Z_NEXT(p->val);
  1371. }
  1372. return FAILURE;
  1373. }
  1374. ZEND_API void ZEND_FASTCALL zend_hash_destroy(HashTable *ht)
  1375. {
  1376. Bucket *p, *end;
  1377. IS_CONSISTENT(ht);
  1378. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1379. if (ht->nNumUsed) {
  1380. p = ht->arData;
  1381. end = p + ht->nNumUsed;
  1382. if (ht->pDestructor) {
  1383. SET_INCONSISTENT(HT_IS_DESTROYING);
  1384. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1385. if (HT_IS_WITHOUT_HOLES(ht)) {
  1386. do {
  1387. ht->pDestructor(&p->val);
  1388. } while (++p != end);
  1389. } else {
  1390. do {
  1391. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1392. ht->pDestructor(&p->val);
  1393. }
  1394. } while (++p != end);
  1395. }
  1396. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1397. do {
  1398. ht->pDestructor(&p->val);
  1399. if (EXPECTED(p->key)) {
  1400. zend_string_release(p->key);
  1401. }
  1402. } while (++p != end);
  1403. } else {
  1404. do {
  1405. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1406. ht->pDestructor(&p->val);
  1407. if (EXPECTED(p->key)) {
  1408. zend_string_release(p->key);
  1409. }
  1410. }
  1411. } while (++p != end);
  1412. }
  1413. SET_INCONSISTENT(HT_DESTROYED);
  1414. } else {
  1415. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1416. do {
  1417. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1418. if (EXPECTED(p->key)) {
  1419. zend_string_release(p->key);
  1420. }
  1421. }
  1422. } while (++p != end);
  1423. }
  1424. }
  1425. zend_hash_iterators_remove(ht);
  1426. } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1427. return;
  1428. }
  1429. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1430. }
  1431. ZEND_API void ZEND_FASTCALL zend_array_destroy(HashTable *ht)
  1432. {
  1433. Bucket *p, *end;
  1434. IS_CONSISTENT(ht);
  1435. HT_ASSERT(ht, GC_REFCOUNT(ht) <= 1);
  1436. /* break possible cycles */
  1437. GC_REMOVE_FROM_BUFFER(ht);
  1438. GC_TYPE_INFO(ht) = IS_NULL /*???| (GC_WHITE << 16)*/;
  1439. if (ht->nNumUsed) {
  1440. /* In some rare cases destructors of regular arrays may be changed */
  1441. if (UNEXPECTED(ht->pDestructor != ZVAL_PTR_DTOR)) {
  1442. zend_hash_destroy(ht);
  1443. goto free_ht;
  1444. }
  1445. p = ht->arData;
  1446. end = p + ht->nNumUsed;
  1447. SET_INCONSISTENT(HT_IS_DESTROYING);
  1448. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1449. do {
  1450. i_zval_ptr_dtor(&p->val);
  1451. } while (++p != end);
  1452. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1453. do {
  1454. i_zval_ptr_dtor(&p->val);
  1455. if (EXPECTED(p->key)) {
  1456. zend_string_release_ex(p->key, 0);
  1457. }
  1458. } while (++p != end);
  1459. } else {
  1460. do {
  1461. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1462. i_zval_ptr_dtor(&p->val);
  1463. if (EXPECTED(p->key)) {
  1464. zend_string_release_ex(p->key, 0);
  1465. }
  1466. }
  1467. } while (++p != end);
  1468. }
  1469. zend_hash_iterators_remove(ht);
  1470. SET_INCONSISTENT(HT_DESTROYED);
  1471. } else if (EXPECTED(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1472. goto free_ht;
  1473. }
  1474. efree(HT_GET_DATA_ADDR(ht));
  1475. free_ht:
  1476. FREE_HASHTABLE(ht);
  1477. }
  1478. ZEND_API void ZEND_FASTCALL zend_hash_clean(HashTable *ht)
  1479. {
  1480. Bucket *p, *end;
  1481. IS_CONSISTENT(ht);
  1482. HT_ASSERT_RC1(ht);
  1483. if (ht->nNumUsed) {
  1484. p = ht->arData;
  1485. end = p + ht->nNumUsed;
  1486. if (ht->pDestructor) {
  1487. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1488. if (HT_IS_WITHOUT_HOLES(ht)) {
  1489. do {
  1490. ht->pDestructor(&p->val);
  1491. } while (++p != end);
  1492. } else {
  1493. do {
  1494. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1495. ht->pDestructor(&p->val);
  1496. }
  1497. } while (++p != end);
  1498. }
  1499. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1500. do {
  1501. ht->pDestructor(&p->val);
  1502. if (EXPECTED(p->key)) {
  1503. zend_string_release(p->key);
  1504. }
  1505. } while (++p != end);
  1506. } else {
  1507. do {
  1508. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1509. ht->pDestructor(&p->val);
  1510. if (EXPECTED(p->key)) {
  1511. zend_string_release(p->key);
  1512. }
  1513. }
  1514. } while (++p != end);
  1515. }
  1516. } else {
  1517. if (!HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1518. if (HT_IS_WITHOUT_HOLES(ht)) {
  1519. do {
  1520. if (EXPECTED(p->key)) {
  1521. zend_string_release(p->key);
  1522. }
  1523. } while (++p != end);
  1524. } else {
  1525. do {
  1526. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1527. if (EXPECTED(p->key)) {
  1528. zend_string_release(p->key);
  1529. }
  1530. }
  1531. } while (++p != end);
  1532. }
  1533. }
  1534. }
  1535. if (!(HT_FLAGS(ht) & HASH_FLAG_PACKED)) {
  1536. HT_HASH_RESET(ht);
  1537. }
  1538. }
  1539. ht->nNumUsed = 0;
  1540. ht->nNumOfElements = 0;
  1541. ht->nNextFreeElement = ZEND_LONG_MIN;
  1542. ht->nInternalPointer = 0;
  1543. }
  1544. ZEND_API void ZEND_FASTCALL zend_symtable_clean(HashTable *ht)
  1545. {
  1546. Bucket *p, *end;
  1547. IS_CONSISTENT(ht);
  1548. HT_ASSERT_RC1(ht);
  1549. if (ht->nNumUsed) {
  1550. p = ht->arData;
  1551. end = p + ht->nNumUsed;
  1552. if (HT_HAS_STATIC_KEYS_ONLY(ht)) {
  1553. do {
  1554. i_zval_ptr_dtor(&p->val);
  1555. } while (++p != end);
  1556. } else if (HT_IS_WITHOUT_HOLES(ht)) {
  1557. do {
  1558. i_zval_ptr_dtor(&p->val);
  1559. if (EXPECTED(p->key)) {
  1560. zend_string_release(p->key);
  1561. }
  1562. } while (++p != end);
  1563. } else {
  1564. do {
  1565. if (EXPECTED(Z_TYPE(p->val) != IS_UNDEF)) {
  1566. i_zval_ptr_dtor(&p->val);
  1567. if (EXPECTED(p->key)) {
  1568. zend_string_release(p->key);
  1569. }
  1570. }
  1571. } while (++p != end);
  1572. }
  1573. HT_HASH_RESET(ht);
  1574. }
  1575. ht->nNumUsed = 0;
  1576. ht->nNumOfElements = 0;
  1577. ht->nNextFreeElement = ZEND_LONG_MIN;
  1578. ht->nInternalPointer = 0;
  1579. }
  1580. ZEND_API void ZEND_FASTCALL zend_hash_graceful_destroy(HashTable *ht)
  1581. {
  1582. uint32_t idx;
  1583. Bucket *p;
  1584. IS_CONSISTENT(ht);
  1585. HT_ASSERT_RC1(ht);
  1586. p = ht->arData;
  1587. for (idx = 0; idx < ht->nNumUsed; idx++, p++) {
  1588. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1589. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1590. }
  1591. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1592. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1593. }
  1594. SET_INCONSISTENT(HT_DESTROYED);
  1595. }
  1596. ZEND_API void ZEND_FASTCALL zend_hash_graceful_reverse_destroy(HashTable *ht)
  1597. {
  1598. uint32_t idx;
  1599. Bucket *p;
  1600. IS_CONSISTENT(ht);
  1601. HT_ASSERT_RC1(ht);
  1602. idx = ht->nNumUsed;
  1603. p = ht->arData + ht->nNumUsed;
  1604. while (idx > 0) {
  1605. idx--;
  1606. p--;
  1607. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1608. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1609. }
  1610. if (!(HT_FLAGS(ht) & HASH_FLAG_UNINITIALIZED)) {
  1611. pefree(HT_GET_DATA_ADDR(ht), GC_FLAGS(ht) & IS_ARRAY_PERSISTENT);
  1612. }
  1613. SET_INCONSISTENT(HT_DESTROYED);
  1614. }
  1615. /* This is used to recurse elements and selectively delete certain entries
  1616. * from a hashtable. apply_func() receives the data and decides if the entry
  1617. * should be deleted or recursion should be stopped. The following three
  1618. * return codes are possible:
  1619. * ZEND_HASH_APPLY_KEEP - continue
  1620. * ZEND_HASH_APPLY_STOP - stop iteration
  1621. * ZEND_HASH_APPLY_REMOVE - delete the element, combineable with the former
  1622. */
  1623. ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
  1624. {
  1625. uint32_t idx;
  1626. Bucket *p;
  1627. int result;
  1628. IS_CONSISTENT(ht);
  1629. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1630. p = ht->arData + idx;
  1631. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1632. result = apply_func(&p->val);
  1633. if (result & ZEND_HASH_APPLY_REMOVE) {
  1634. HT_ASSERT_RC1(ht);
  1635. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1636. }
  1637. if (result & ZEND_HASH_APPLY_STOP) {
  1638. break;
  1639. }
  1640. }
  1641. }
  1642. ZEND_API void ZEND_FASTCALL zend_hash_apply_with_argument(HashTable *ht, apply_func_arg_t apply_func, void *argument)
  1643. {
  1644. uint32_t idx;
  1645. Bucket *p;
  1646. int result;
  1647. IS_CONSISTENT(ht);
  1648. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1649. p = ht->arData + idx;
  1650. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1651. result = apply_func(&p->val, argument);
  1652. if (result & ZEND_HASH_APPLY_REMOVE) {
  1653. HT_ASSERT_RC1(ht);
  1654. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1655. }
  1656. if (result & ZEND_HASH_APPLY_STOP) {
  1657. break;
  1658. }
  1659. }
  1660. }
  1661. ZEND_API void zend_hash_apply_with_arguments(HashTable *ht, apply_func_args_t apply_func, int num_args, ...)
  1662. {
  1663. uint32_t idx;
  1664. Bucket *p;
  1665. va_list args;
  1666. zend_hash_key hash_key;
  1667. int result;
  1668. IS_CONSISTENT(ht);
  1669. for (idx = 0; idx < ht->nNumUsed; idx++) {
  1670. p = ht->arData + idx;
  1671. if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
  1672. va_start(args, num_args);
  1673. hash_key.h = p->h;
  1674. hash_key.key = p->key;
  1675. result = apply_func(&p->val, num_args, args, &hash_key);
  1676. if (result & ZEND_HASH_APPLY_REMOVE) {
  1677. HT_ASSERT_RC1(ht);
  1678. _zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
  1679. }
  1680. if

Large files files are truncated, but you can click here to view the full file