uthash.h (73912B)
1 /* 2 Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/ 3 All rights reserved. 4 5 Redistribution and use in source and binary forms, with or without 6 modification, are permitted provided that the following conditions are met: 7 8 * Redistributions of source code must retain the above copyright 9 notice, this list of conditions and the following disclaimer. 10 11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 */ 23 24 #ifndef UTHASH_H 25 #define UTHASH_H 26 27 #define UTHASH_VERSION 2.3.0 28 29 #include <string.h> /* memcmp, memset, strlen */ 30 #include <stddef.h> /* ptrdiff_t */ 31 #include <stdlib.h> /* exit */ 32 33 #if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT 34 /* This codepath is provided for backward compatibility, but I plan to remove it. */ 35 #warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" 36 typedef unsigned int uint32_t; 37 typedef unsigned char uint8_t; 38 #elif defined(HASH_NO_STDINT) && HASH_NO_STDINT 39 #else 40 #include <stdint.h> /* uint8_t, uint32_t */ 41 #endif 42 43 /* These macros use decltype or the earlier __typeof GNU extension. 44 As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 45 when compiling c++ source) this code uses whatever method is needed 46 or, for VS2008 where neither is available, uses casting workarounds. */ 47 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE) 48 #if defined(_MSC_VER) /* MS compiler */ 49 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 50 #define DECLTYPE(x) (decltype(x)) 51 #else /* VS2008 or older (or VS2010 in C mode) */ 52 #define NO_DECLTYPE 53 #endif 54 #elif defined(__MCST__) /* Elbrus C Compiler */ 55 #define DECLTYPE(x) (__typeof(x)) 56 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) 57 #define NO_DECLTYPE 58 #else /* GNU, Sun and other compilers */ 59 #define DECLTYPE(x) (__typeof(x)) 60 #endif 61 #endif 62 63 #ifdef NO_DECLTYPE 64 #define DECLTYPE(x) 65 #define DECLTYPE_ASSIGN(dst,src) \ 66 do { \ 67 char **_da_dst = (char**)(&(dst)); \ 68 *_da_dst = (char*)(src); \ 69 } while (0) 70 #else 71 #define DECLTYPE_ASSIGN(dst,src) \ 72 do { \ 73 (dst) = DECLTYPE(dst)(src); \ 74 } while (0) 75 #endif 76 77 #ifndef uthash_malloc 78 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 79 #endif 80 #ifndef uthash_free 81 #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 82 #endif 83 #ifndef uthash_bzero 84 #define uthash_bzero(a,n) memset(a,'\0',n) 85 #endif 86 #ifndef uthash_strlen 87 #define uthash_strlen(s) strlen(s) 88 #endif 89 90 #ifndef HASH_FUNCTION 91 #define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) 92 #endif 93 94 #ifndef HASH_KEYCMP 95 #define HASH_KEYCMP(a,b,n) memcmp(a,b,n) 96 #endif 97 98 #ifndef uthash_noexpand_fyi 99 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 100 #endif 101 #ifndef uthash_expand_fyi 102 #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 103 #endif 104 105 #ifndef HASH_NONFATAL_OOM 106 #define HASH_NONFATAL_OOM 0 107 #endif 108 109 #if HASH_NONFATAL_OOM 110 /* malloc failures can be recovered from */ 111 112 #ifndef uthash_nonfatal_oom 113 #define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ 114 #endif 115 116 #define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) 117 #define IF_HASH_NONFATAL_OOM(x) x 118 119 #else 120 /* malloc failures result in lost memory, hash tables are unusable */ 121 122 #ifndef uthash_fatal 123 #define uthash_fatal(msg) exit(-1) /* fatal OOM error */ 124 #endif 125 126 #define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") 127 #define IF_HASH_NONFATAL_OOM(x) 128 129 #endif 130 131 /* initial number of buckets */ 132 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ 133 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ 134 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ 135 136 /* calculate the element whose hash handle address is hhp */ 137 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 138 /* calculate the hash handle from element address elp */ 139 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) 140 141 #define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ 142 do { \ 143 struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ 144 unsigned _hd_bkt; \ 145 HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 146 (head)->hh.tbl->buckets[_hd_bkt].count++; \ 147 _hd_hh_item->hh_next = NULL; \ 148 _hd_hh_item->hh_prev = NULL; \ 149 } while (0) 150 151 #define HASH_VALUE(keyptr,keylen,hashv) \ 152 do { \ 153 HASH_FUNCTION(keyptr, keylen, hashv); \ 154 } while (0) 155 156 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ 157 do { \ 158 (out) = NULL; \ 159 if (head) { \ 160 unsigned _hf_bkt; \ 161 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ 162 if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ 163 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ 164 } \ 165 } \ 166 } while (0) 167 168 #define HASH_FIND(hh,head,keyptr,keylen,out) \ 169 do { \ 170 (out) = NULL; \ 171 if (head) { \ 172 unsigned _hf_hashv; \ 173 HASH_VALUE(keyptr, keylen, _hf_hashv); \ 174 HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ 175 } \ 176 } while (0) 177 178 #ifdef HASH_BLOOM 179 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) 180 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) 181 #define HASH_BLOOM_MAKE(tbl,oomed) \ 182 do { \ 183 (tbl)->bloom_nbits = HASH_BLOOM; \ 184 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 185 if (!(tbl)->bloom_bv) { \ 186 HASH_RECORD_OOM(oomed); \ 187 } else { \ 188 uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 189 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 190 } \ 191 } while (0) 192 193 #define HASH_BLOOM_FREE(tbl) \ 194 do { \ 195 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 196 } while (0) 197 198 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) 199 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) 200 201 #define HASH_BLOOM_ADD(tbl,hashv) \ 202 HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 203 204 #define HASH_BLOOM_TEST(tbl,hashv) \ 205 HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) 206 207 #else 208 #define HASH_BLOOM_MAKE(tbl,oomed) 209 #define HASH_BLOOM_FREE(tbl) 210 #define HASH_BLOOM_ADD(tbl,hashv) 211 #define HASH_BLOOM_TEST(tbl,hashv) (1) 212 #define HASH_BLOOM_BYTELEN 0U 213 #endif 214 215 #define HASH_MAKE_TABLE(hh,head,oomed) \ 216 do { \ 217 (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ 218 if (!(head)->hh.tbl) { \ 219 HASH_RECORD_OOM(oomed); \ 220 } else { \ 221 uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ 222 (head)->hh.tbl->tail = &((head)->hh); \ 223 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 224 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 225 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 226 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 227 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 228 (head)->hh.tbl->signature = HASH_SIGNATURE; \ 229 if (!(head)->hh.tbl->buckets) { \ 230 HASH_RECORD_OOM(oomed); \ 231 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 232 } else { \ 233 uthash_bzero((head)->hh.tbl->buckets, \ 234 HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ 235 HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ 236 IF_HASH_NONFATAL_OOM( \ 237 if (oomed) { \ 238 uthash_free((head)->hh.tbl->buckets, \ 239 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 240 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 241 } \ 242 ) \ 243 } \ 244 } \ 245 } while (0) 246 247 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ 248 do { \ 249 (replaced) = NULL; \ 250 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 251 if (replaced) { \ 252 HASH_DELETE(hh, head, replaced); \ 253 } \ 254 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ 255 } while (0) 256 257 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ 258 do { \ 259 (replaced) = NULL; \ 260 HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ 261 if (replaced) { \ 262 HASH_DELETE(hh, head, replaced); \ 263 } \ 264 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ 265 } while (0) 266 267 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 268 do { \ 269 unsigned _hr_hashv; \ 270 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 271 HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ 272 } while (0) 273 274 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ 275 do { \ 276 unsigned _hr_hashv; \ 277 HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ 278 HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ 279 } while (0) 280 281 #define HASH_APPEND_LIST(hh, head, add) \ 282 do { \ 283 (add)->hh.next = NULL; \ 284 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 285 (head)->hh.tbl->tail->next = (add); \ 286 (head)->hh.tbl->tail = &((add)->hh); \ 287 } while (0) 288 289 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 290 do { \ 291 do { \ 292 if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ 293 break; \ 294 } \ 295 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 296 } while (0) 297 298 #ifdef NO_DECLTYPE 299 #undef HASH_AKBI_INNER_LOOP 300 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ 301 do { \ 302 char *_hs_saved_head = (char*)(head); \ 303 do { \ 304 DECLTYPE_ASSIGN(head, _hs_iter); \ 305 if (cmpfcn(head, add) > 0) { \ 306 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 307 break; \ 308 } \ 309 DECLTYPE_ASSIGN(head, _hs_saved_head); \ 310 } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ 311 } while (0) 312 #endif 313 314 #if HASH_NONFATAL_OOM 315 316 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 317 do { \ 318 if (!(oomed)) { \ 319 unsigned _ha_bkt; \ 320 (head)->hh.tbl->num_items++; \ 321 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 322 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 323 if (oomed) { \ 324 HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ 325 HASH_DELETE_HH(hh, head, &(add)->hh); \ 326 (add)->hh.tbl = NULL; \ 327 uthash_nonfatal_oom(add); \ 328 } else { \ 329 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 330 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 331 } \ 332 } else { \ 333 (add)->hh.tbl = NULL; \ 334 uthash_nonfatal_oom(add); \ 335 } \ 336 } while (0) 337 338 #else 339 340 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ 341 do { \ 342 unsigned _ha_bkt; \ 343 (head)->hh.tbl->num_items++; \ 344 HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ 345 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ 346 HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ 347 HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ 348 } while (0) 349 350 #endif 351 352 353 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ 354 do { \ 355 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 356 (add)->hh.hashv = (hashval); \ 357 (add)->hh.key = (char*) (keyptr); \ 358 (add)->hh.keylen = (unsigned) (keylen_in); \ 359 if (!(head)) { \ 360 (add)->hh.next = NULL; \ 361 (add)->hh.prev = NULL; \ 362 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 363 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 364 (head) = (add); \ 365 IF_HASH_NONFATAL_OOM( } ) \ 366 } else { \ 367 void *_hs_iter = (head); \ 368 (add)->hh.tbl = (head)->hh.tbl; \ 369 HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ 370 if (_hs_iter) { \ 371 (add)->hh.next = _hs_iter; \ 372 if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ 373 HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ 374 } else { \ 375 (head) = (add); \ 376 } \ 377 HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ 378 } else { \ 379 HASH_APPEND_LIST(hh, head, add); \ 380 } \ 381 } \ 382 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 383 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ 384 } while (0) 385 386 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ 387 do { \ 388 unsigned _hs_hashv; \ 389 HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ 390 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ 391 } while (0) 392 393 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ 394 HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) 395 396 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ 397 HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) 398 399 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ 400 do { \ 401 IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ 402 (add)->hh.hashv = (hashval); \ 403 (add)->hh.key = (const void*) (keyptr); \ 404 (add)->hh.keylen = (unsigned) (keylen_in); \ 405 if (!(head)) { \ 406 (add)->hh.next = NULL; \ 407 (add)->hh.prev = NULL; \ 408 HASH_MAKE_TABLE(hh, add, _ha_oomed); \ 409 IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ 410 (head) = (add); \ 411 IF_HASH_NONFATAL_OOM( } ) \ 412 } else { \ 413 (add)->hh.tbl = (head)->hh.tbl; \ 414 HASH_APPEND_LIST(hh, head, add); \ 415 } \ 416 HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ 417 HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ 418 } while (0) 419 420 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 421 do { \ 422 unsigned _ha_hashv; \ 423 HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ 424 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ 425 } while (0) 426 427 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ 428 HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) 429 430 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 431 HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) 432 433 #define HASH_TO_BKT(hashv,num_bkts,bkt) \ 434 do { \ 435 bkt = ((hashv) & ((num_bkts) - 1U)); \ 436 } while (0) 437 438 /* delete "delptr" from the hash table. 439 * "the usual" patch-up process for the app-order doubly-linked-list. 440 * The use of _hd_hh_del below deserves special explanation. 441 * These used to be expressed using (delptr) but that led to a bug 442 * if someone used the same symbol for the head and deletee, like 443 * HASH_DELETE(hh,users,users); 444 * We want that to work, but by changing the head (users) below 445 * we were forfeiting our ability to further refer to the deletee (users) 446 * in the patch-up process. Solution: use scratch space to 447 * copy the deletee pointer, then the latter references are via that 448 * scratch pointer rather than through the repointed (users) symbol. 449 */ 450 #define HASH_DELETE(hh,head,delptr) \ 451 HASH_DELETE_HH(hh, head, &(delptr)->hh) 452 453 #define HASH_DELETE_HH(hh,head,delptrhh) \ 454 do { \ 455 struct UT_hash_handle *_hd_hh_del = (delptrhh); \ 456 if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ 457 HASH_BLOOM_FREE((head)->hh.tbl); \ 458 uthash_free((head)->hh.tbl->buckets, \ 459 (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 460 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 461 (head) = NULL; \ 462 } else { \ 463 unsigned _hd_bkt; \ 464 if (_hd_hh_del == (head)->hh.tbl->tail) { \ 465 (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ 466 } \ 467 if (_hd_hh_del->prev != NULL) { \ 468 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ 469 } else { \ 470 DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ 471 } \ 472 if (_hd_hh_del->next != NULL) { \ 473 HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ 474 } \ 475 HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 476 HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 477 (head)->hh.tbl->num_items--; \ 478 } \ 479 HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ 480 } while (0) 481 482 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 483 #define HASH_FIND_STR(head,findstr,out) \ 484 do { \ 485 unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ 486 HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ 487 } while (0) 488 #define HASH_ADD_STR(head,strfield,add) \ 489 do { \ 490 unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 491 HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ 492 } while (0) 493 #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 494 do { \ 495 unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ 496 HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ 497 } while (0) 498 #define HASH_FIND_INT(head,findint,out) \ 499 HASH_FIND(hh,head,findint,sizeof(int),out) 500 #define HASH_ADD_INT(head,intfield,add) \ 501 HASH_ADD(hh,head,intfield,sizeof(int),add) 502 #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 503 HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 504 #define HASH_FIND_PTR(head,findptr,out) \ 505 HASH_FIND(hh,head,findptr,sizeof(void *),out) 506 #define HASH_ADD_PTR(head,ptrfield,add) \ 507 HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 508 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ 509 HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 510 #define HASH_DEL(head,delptr) \ 511 HASH_DELETE(hh,head,delptr) 512 513 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 514 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 515 */ 516 #ifdef HASH_DEBUG 517 #include <stdio.h> /* fprintf, stderr */ 518 #define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) 519 #define HASH_FSCK(hh,head,where) \ 520 do { \ 521 struct UT_hash_handle *_thh; \ 522 if (head) { \ 523 unsigned _bkt_i; \ 524 unsigned _count = 0; \ 525 char *_prev; \ 526 for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ 527 unsigned _bkt_count = 0; \ 528 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 529 _prev = NULL; \ 530 while (_thh) { \ 531 if (_prev != (char*)(_thh->hh_prev)) { \ 532 HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ 533 (where), (void*)_thh->hh_prev, (void*)_prev); \ 534 } \ 535 _bkt_count++; \ 536 _prev = (char*)(_thh); \ 537 _thh = _thh->hh_next; \ 538 } \ 539 _count += _bkt_count; \ 540 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 541 HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ 542 (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 543 } \ 544 } \ 545 if (_count != (head)->hh.tbl->num_items) { \ 546 HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ 547 (where), (head)->hh.tbl->num_items, _count); \ 548 } \ 549 _count = 0; \ 550 _prev = NULL; \ 551 _thh = &(head)->hh; \ 552 while (_thh) { \ 553 _count++; \ 554 if (_prev != (char*)_thh->prev) { \ 555 HASH_OOPS("%s: invalid prev %p, actual %p\n", \ 556 (where), (void*)_thh->prev, (void*)_prev); \ 557 } \ 558 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 559 _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ 560 } \ 561 if (_count != (head)->hh.tbl->num_items) { \ 562 HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ 563 (where), (head)->hh.tbl->num_items, _count); \ 564 } \ 565 } \ 566 } while (0) 567 #else 568 #define HASH_FSCK(hh,head,where) 569 #endif 570 571 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 572 * the descriptor to which this macro is defined for tuning the hash function. 573 * The app can #include <unistd.h> to get the prototype for write(2). */ 574 #ifdef HASH_EMIT_KEYS 575 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 576 do { \ 577 unsigned _klen = fieldlen; \ 578 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 579 write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ 580 } while (0) 581 #else 582 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 583 #endif 584 585 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ 586 #define HASH_BER(key,keylen,hashv) \ 587 do { \ 588 unsigned _hb_keylen = (unsigned)keylen; \ 589 const unsigned char *_hb_key = (const unsigned char*)(key); \ 590 (hashv) = 0; \ 591 while (_hb_keylen-- != 0U) { \ 592 (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ 593 } \ 594 } while (0) 595 596 597 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 598 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx 599 * (archive link: https://archive.is/Ivcan ) 600 */ 601 #define HASH_SAX(key,keylen,hashv) \ 602 do { \ 603 unsigned _sx_i; \ 604 const unsigned char *_hs_key = (const unsigned char*)(key); \ 605 hashv = 0; \ 606 for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ 607 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 608 } \ 609 } while (0) 610 /* FNV-1a variation */ 611 #define HASH_FNV(key,keylen,hashv) \ 612 do { \ 613 unsigned _fn_i; \ 614 const unsigned char *_hf_key = (const unsigned char*)(key); \ 615 (hashv) = 2166136261U; \ 616 for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ 617 hashv = hashv ^ _hf_key[_fn_i]; \ 618 hashv = hashv * 16777619U; \ 619 } \ 620 } while (0) 621 622 #define HASH_OAT(key,keylen,hashv) \ 623 do { \ 624 unsigned _ho_i; \ 625 const unsigned char *_ho_key=(const unsigned char*)(key); \ 626 hashv = 0; \ 627 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 628 hashv += _ho_key[_ho_i]; \ 629 hashv += (hashv << 10); \ 630 hashv ^= (hashv >> 6); \ 631 } \ 632 hashv += (hashv << 3); \ 633 hashv ^= (hashv >> 11); \ 634 hashv += (hashv << 15); \ 635 } while (0) 636 637 #define HASH_JEN_MIX(a,b,c) \ 638 do { \ 639 a -= b; a -= c; a ^= ( c >> 13 ); \ 640 b -= c; b -= a; b ^= ( a << 8 ); \ 641 c -= a; c -= b; c ^= ( b >> 13 ); \ 642 a -= b; a -= c; a ^= ( c >> 12 ); \ 643 b -= c; b -= a; b ^= ( a << 16 ); \ 644 c -= a; c -= b; c ^= ( b >> 5 ); \ 645 a -= b; a -= c; a ^= ( c >> 3 ); \ 646 b -= c; b -= a; b ^= ( a << 10 ); \ 647 c -= a; c -= b; c ^= ( b >> 15 ); \ 648 } while (0) 649 650 #define HASH_JEN(key,keylen,hashv) \ 651 do { \ 652 unsigned _hj_i,_hj_j,_hj_k; \ 653 unsigned const char *_hj_key=(unsigned const char*)(key); \ 654 hashv = 0xfeedbeefu; \ 655 _hj_i = _hj_j = 0x9e3779b9u; \ 656 _hj_k = (unsigned)(keylen); \ 657 while (_hj_k >= 12U) { \ 658 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 659 + ( (unsigned)_hj_key[2] << 16 ) \ 660 + ( (unsigned)_hj_key[3] << 24 ) ); \ 661 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 662 + ( (unsigned)_hj_key[6] << 16 ) \ 663 + ( (unsigned)_hj_key[7] << 24 ) ); \ 664 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 665 + ( (unsigned)_hj_key[10] << 16 ) \ 666 + ( (unsigned)_hj_key[11] << 24 ) ); \ 667 \ 668 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 669 \ 670 _hj_key += 12; \ 671 _hj_k -= 12U; \ 672 } \ 673 hashv += (unsigned)(keylen); \ 674 switch ( _hj_k ) { \ 675 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ 676 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ 677 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ 678 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ 679 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ 680 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ 681 case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ 682 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ 683 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ 684 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ 685 case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ 686 default: ; \ 687 } \ 688 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 689 } while (0) 690 691 /* The Paul Hsieh hash function */ 692 #undef get16bits 693 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 694 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 695 #define get16bits(d) (*((const uint16_t *) (d))) 696 #endif 697 698 #if !defined (get16bits) 699 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 700 +(uint32_t)(((const uint8_t *)(d))[0]) ) 701 #endif 702 #define HASH_SFH(key,keylen,hashv) \ 703 do { \ 704 unsigned const char *_sfh_key=(unsigned const char*)(key); \ 705 uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ 706 \ 707 unsigned _sfh_rem = _sfh_len & 3U; \ 708 _sfh_len >>= 2; \ 709 hashv = 0xcafebabeu; \ 710 \ 711 /* Main loop */ \ 712 for (;_sfh_len > 0U; _sfh_len--) { \ 713 hashv += get16bits (_sfh_key); \ 714 _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ 715 hashv = (hashv << 16) ^ _sfh_tmp; \ 716 _sfh_key += 2U*sizeof (uint16_t); \ 717 hashv += hashv >> 11; \ 718 } \ 719 \ 720 /* Handle end cases */ \ 721 switch (_sfh_rem) { \ 722 case 3: hashv += get16bits (_sfh_key); \ 723 hashv ^= hashv << 16; \ 724 hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ 725 hashv += hashv >> 11; \ 726 break; \ 727 case 2: hashv += get16bits (_sfh_key); \ 728 hashv ^= hashv << 11; \ 729 hashv += hashv >> 17; \ 730 break; \ 731 case 1: hashv += *_sfh_key; \ 732 hashv ^= hashv << 10; \ 733 hashv += hashv >> 1; \ 734 break; \ 735 default: ; \ 736 } \ 737 \ 738 /* Force "avalanching" of final 127 bits */ \ 739 hashv ^= hashv << 3; \ 740 hashv += hashv >> 5; \ 741 hashv ^= hashv << 4; \ 742 hashv += hashv >> 17; \ 743 hashv ^= hashv << 25; \ 744 hashv += hashv >> 6; \ 745 } while (0) 746 747 /* iterate over items in a known bucket to find desired item */ 748 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ 749 do { \ 750 if ((head).hh_head != NULL) { \ 751 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ 752 } else { \ 753 (out) = NULL; \ 754 } \ 755 while ((out) != NULL) { \ 756 if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ 757 if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ 758 break; \ 759 } \ 760 } \ 761 if ((out)->hh.hh_next != NULL) { \ 762 DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ 763 } else { \ 764 (out) = NULL; \ 765 } \ 766 } \ 767 } while (0) 768 769 /* add an item to a bucket */ 770 #define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ 771 do { \ 772 UT_hash_bucket *_ha_head = &(head); \ 773 _ha_head->count++; \ 774 (addhh)->hh_next = _ha_head->hh_head; \ 775 (addhh)->hh_prev = NULL; \ 776 if (_ha_head->hh_head != NULL) { \ 777 _ha_head->hh_head->hh_prev = (addhh); \ 778 } \ 779 _ha_head->hh_head = (addhh); \ 780 if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ 781 && !(addhh)->tbl->noexpand) { \ 782 HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ 783 IF_HASH_NONFATAL_OOM( \ 784 if (oomed) { \ 785 HASH_DEL_IN_BKT(head,addhh); \ 786 } \ 787 ) \ 788 } \ 789 } while (0) 790 791 /* remove an item from a given bucket */ 792 #define HASH_DEL_IN_BKT(head,delhh) \ 793 do { \ 794 UT_hash_bucket *_hd_head = &(head); \ 795 _hd_head->count--; \ 796 if (_hd_head->hh_head == (delhh)) { \ 797 _hd_head->hh_head = (delhh)->hh_next; \ 798 } \ 799 if ((delhh)->hh_prev) { \ 800 (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ 801 } \ 802 if ((delhh)->hh_next) { \ 803 (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ 804 } \ 805 } while (0) 806 807 /* Bucket expansion has the effect of doubling the number of buckets 808 * and redistributing the items into the new buckets. Ideally the 809 * items will distribute more or less evenly into the new buckets 810 * (the extent to which this is true is a measure of the quality of 811 * the hash function as it applies to the key domain). 812 * 813 * With the items distributed into more buckets, the chain length 814 * (item count) in each bucket is reduced. Thus by expanding buckets 815 * the hash keeps a bound on the chain length. This bounded chain 816 * length is the essence of how a hash provides constant time lookup. 817 * 818 * The calculation of tbl->ideal_chain_maxlen below deserves some 819 * explanation. First, keep in mind that we're calculating the ideal 820 * maximum chain length based on the *new* (doubled) bucket count. 821 * In fractions this is just n/b (n=number of items,b=new num buckets). 822 * Since the ideal chain length is an integer, we want to calculate 823 * ceil(n/b). We don't depend on floating point arithmetic in this 824 * hash, so to calculate ceil(n/b) with integers we could write 825 * 826 * ceil(n/b) = (n/b) + ((n%b)?1:0) 827 * 828 * and in fact a previous version of this hash did just that. 829 * But now we have improved things a bit by recognizing that b is 830 * always a power of two. We keep its base 2 log handy (call it lb), 831 * so now we can write this with a bit shift and logical AND: 832 * 833 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 834 * 835 */ 836 #define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ 837 do { \ 838 unsigned _he_bkt; \ 839 unsigned _he_bkt_i; \ 840 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 841 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 842 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 843 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 844 if (!_he_new_buckets) { \ 845 HASH_RECORD_OOM(oomed); \ 846 } else { \ 847 uthash_bzero(_he_new_buckets, \ 848 sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ 849 (tbl)->ideal_chain_maxlen = \ 850 ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ 851 ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ 852 (tbl)->nonideal_items = 0; \ 853 for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ 854 _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ 855 while (_he_thh != NULL) { \ 856 _he_hh_nxt = _he_thh->hh_next; \ 857 HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ 858 _he_newbkt = &(_he_new_buckets[_he_bkt]); \ 859 if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ 860 (tbl)->nonideal_items++; \ 861 if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ 862 _he_newbkt->expand_mult++; \ 863 } \ 864 } \ 865 _he_thh->hh_prev = NULL; \ 866 _he_thh->hh_next = _he_newbkt->hh_head; \ 867 if (_he_newbkt->hh_head != NULL) { \ 868 _he_newbkt->hh_head->hh_prev = _he_thh; \ 869 } \ 870 _he_newbkt->hh_head = _he_thh; \ 871 _he_thh = _he_hh_nxt; \ 872 } \ 873 } \ 874 uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ 875 (tbl)->num_buckets *= 2U; \ 876 (tbl)->log2_num_buckets++; \ 877 (tbl)->buckets = _he_new_buckets; \ 878 (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ 879 ((tbl)->ineff_expands+1U) : 0U; \ 880 if ((tbl)->ineff_expands > 1U) { \ 881 (tbl)->noexpand = 1; \ 882 uthash_noexpand_fyi(tbl); \ 883 } \ 884 uthash_expand_fyi(tbl); \ 885 } \ 886 } while (0) 887 888 889 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 890 /* Note that HASH_SORT assumes the hash handle name to be hh. 891 * HASH_SRT was added to allow the hash handle name to be passed in. */ 892 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 893 #define HASH_SRT(hh,head,cmpfcn) \ 894 do { \ 895 unsigned _hs_i; \ 896 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 897 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 898 if (head != NULL) { \ 899 _hs_insize = 1; \ 900 _hs_looping = 1; \ 901 _hs_list = &((head)->hh); \ 902 while (_hs_looping != 0U) { \ 903 _hs_p = _hs_list; \ 904 _hs_list = NULL; \ 905 _hs_tail = NULL; \ 906 _hs_nmerges = 0; \ 907 while (_hs_p != NULL) { \ 908 _hs_nmerges++; \ 909 _hs_q = _hs_p; \ 910 _hs_psize = 0; \ 911 for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ 912 _hs_psize++; \ 913 _hs_q = ((_hs_q->next != NULL) ? \ 914 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 915 if (_hs_q == NULL) { \ 916 break; \ 917 } \ 918 } \ 919 _hs_qsize = _hs_insize; \ 920 while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ 921 if (_hs_psize == 0U) { \ 922 _hs_e = _hs_q; \ 923 _hs_q = ((_hs_q->next != NULL) ? \ 924 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 925 _hs_qsize--; \ 926 } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ 927 _hs_e = _hs_p; \ 928 if (_hs_p != NULL) { \ 929 _hs_p = ((_hs_p->next != NULL) ? \ 930 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 931 } \ 932 _hs_psize--; \ 933 } else if ((cmpfcn( \ 934 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ 935 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ 936 )) <= 0) { \ 937 _hs_e = _hs_p; \ 938 if (_hs_p != NULL) { \ 939 _hs_p = ((_hs_p->next != NULL) ? \ 940 HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ 941 } \ 942 _hs_psize--; \ 943 } else { \ 944 _hs_e = _hs_q; \ 945 _hs_q = ((_hs_q->next != NULL) ? \ 946 HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ 947 _hs_qsize--; \ 948 } \ 949 if ( _hs_tail != NULL ) { \ 950 _hs_tail->next = ((_hs_e != NULL) ? \ 951 ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ 952 } else { \ 953 _hs_list = _hs_e; \ 954 } \ 955 if (_hs_e != NULL) { \ 956 _hs_e->prev = ((_hs_tail != NULL) ? \ 957 ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ 958 } \ 959 _hs_tail = _hs_e; \ 960 } \ 961 _hs_p = _hs_q; \ 962 } \ 963 if (_hs_tail != NULL) { \ 964 _hs_tail->next = NULL; \ 965 } \ 966 if (_hs_nmerges <= 1U) { \ 967 _hs_looping = 0; \ 968 (head)->hh.tbl->tail = _hs_tail; \ 969 DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 970 } \ 971 _hs_insize *= 2U; \ 972 } \ 973 HASH_FSCK(hh, head, "HASH_SRT"); \ 974 } \ 975 } while (0) 976 977 /* This function selects items from one hash into another hash. 978 * The end result is that the selected items have dual presence 979 * in both hashes. There is no copy of the items made; rather 980 * they are added into the new hash through a secondary hash 981 * hash handle that must be present in the structure. */ 982 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 983 do { \ 984 unsigned _src_bkt, _dst_bkt; \ 985 void *_last_elt = NULL, *_elt; \ 986 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 987 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 988 if ((src) != NULL) { \ 989 for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 990 for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 991 _src_hh != NULL; \ 992 _src_hh = _src_hh->hh_next) { \ 993 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 994 if (cond(_elt)) { \ 995 IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ 996 _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ 997 _dst_hh->key = _src_hh->key; \ 998 _dst_hh->keylen = _src_hh->keylen; \ 999 _dst_hh->hashv = _src_hh->hashv; \ 1000 _dst_hh->prev = _last_elt; \ 1001 _dst_hh->next = NULL; \ 1002 if (_last_elt_hh != NULL) { \ 1003 _last_elt_hh->next = _elt; \ 1004 } \ 1005 if ((dst) == NULL) { \ 1006 DECLTYPE_ASSIGN(dst, _elt); \ 1007 HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ 1008 IF_HASH_NONFATAL_OOM( \ 1009 if (_hs_oomed) { \ 1010 uthash_nonfatal_oom(_elt); \ 1011 (dst) = NULL; \ 1012 continue; \ 1013 } \ 1014 ) \ 1015 } else { \ 1016 _dst_hh->tbl = (dst)->hh_dst.tbl; \ 1017 } \ 1018 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 1019 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ 1020 (dst)->hh_dst.tbl->num_items++; \ 1021 IF_HASH_NONFATAL_OOM( \ 1022 if (_hs_oomed) { \ 1023 HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ 1024 HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ 1025 _dst_hh->tbl = NULL; \ 1026 uthash_nonfatal_oom(_elt); \ 1027 continue; \ 1028 } \ 1029 ) \ 1030 HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ 1031 _last_elt = _elt; \ 1032 _last_elt_hh = _dst_hh; \ 1033 } \ 1034 } \ 1035 } \ 1036 } \ 1037 HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ 1038 } while (0) 1039 1040 #define HASH_CLEAR(hh,head) \ 1041 do { \ 1042 if ((head) != NULL) { \ 1043 HASH_BLOOM_FREE((head)->hh.tbl); \ 1044 uthash_free((head)->hh.tbl->buckets, \ 1045 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 1046 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 1047 (head) = NULL; \ 1048 } \ 1049 } while (0) 1050 1051 #define HASH_OVERHEAD(hh,head) \ 1052 (((head) != NULL) ? ( \ 1053 (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 1054 ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 1055 sizeof(UT_hash_table) + \ 1056 (HASH_BLOOM_BYTELEN))) : 0U) 1057 1058 #ifdef NO_DECLTYPE 1059 #define HASH_ITER(hh,head,el,tmp) \ 1060 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ 1061 (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1062 #else 1063 #define HASH_ITER(hh,head,el,tmp) \ 1064 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ 1065 (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) 1066 #endif 1067 1068 /* obtain a count of items in the hash */ 1069 #define HASH_COUNT(head) HASH_CNT(hh,head) 1070 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) 1071 1072 typedef struct UT_hash_bucket { 1073 struct UT_hash_handle *hh_head; 1074 unsigned count; 1075 1076 /* expand_mult is normally set to 0. In this situation, the max chain length 1077 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 1078 * the bucket's chain exceeds this length, bucket expansion is triggered). 1079 * However, setting expand_mult to a non-zero value delays bucket expansion 1080 * (that would be triggered by additions to this particular bucket) 1081 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 1082 * (The multiplier is simply expand_mult+1). The whole idea of this 1083 * multiplier is to reduce bucket expansions, since they are expensive, in 1084 * situations where we know that a particular bucket tends to be overused. 1085 * It is better to let its chain length grow to a longer yet-still-bounded 1086 * value, than to do an O(n) bucket expansion too often. 1087 */ 1088 unsigned expand_mult; 1089 1090 } UT_hash_bucket; 1091 1092 /* random signature used only to find hash tables in external analysis */ 1093 #define HASH_SIGNATURE 0xa0111fe1u 1094 #define HASH_BLOOM_SIGNATURE 0xb12220f2u 1095 1096 typedef struct UT_hash_table { 1097 UT_hash_bucket *buckets; 1098 unsigned num_buckets, log2_num_buckets; 1099 unsigned num_items; 1100 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 1101 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 1102 1103 /* in an ideal situation (all buckets used equally), no bucket would have 1104 * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 1105 unsigned ideal_chain_maxlen; 1106 1107 /* nonideal_items is the number of items in the hash whose chain position 1108 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 1109 * hash distribution; reaching them in a chain traversal takes >ideal steps */ 1110 unsigned nonideal_items; 1111 1112 /* ineffective expands occur when a bucket doubling was performed, but 1113 * afterward, more than half the items in the hash had nonideal chain 1114 * positions. If this happens on two consecutive expansions we inhibit any 1115 * further expansion, as it's not helping; this happens when the hash 1116 * function isn't a good fit for the key domain. When expansion is inhibited 1117 * the hash will still work, albeit no longer in constant time. */ 1118 unsigned ineff_expands, noexpand; 1119 1120 uint32_t signature; /* used only to find hash tables in external analysis */ 1121 #ifdef HASH_BLOOM 1122 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 1123 uint8_t *bloom_bv; 1124 uint8_t bloom_nbits; 1125 #endif 1126 1127 } UT_hash_table; 1128 1129 typedef struct UT_hash_handle { 1130 struct UT_hash_table *tbl; 1131 void *prev; /* prev element in app order */ 1132 void *next; /* next element in app order */ 1133 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 1134 struct UT_hash_handle *hh_next; /* next hh in bucket order */ 1135 const void *key; /* ptr to enclosing struct's key */ 1136 unsigned keylen; /* enclosing struct's key len */ 1137 unsigned hashv; /* result of hash-fcn(key) */ 1138 } UT_hash_handle; 1139 1140 #endif /* UTHASH_H */