Global Lender
A Pebble app for finding new and interesting loans on kiva.org
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2014, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are met:
6  * Redistributions of source code must retain the above copyright
7  notice, this list of conditions and the following disclaimer.
8 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
9 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
10 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
11 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
12 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
13 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
14 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
15 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
16 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
17 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
18 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 */
20 
21 #ifndef UTHASH_H
22 #define UTHASH_H
23 
24 #include <string.h> /* memcmp,strlen */
25 #include <stddef.h> /* ptrdiff_t */
26 // JRB: removed stdlib.h include for exit() since it is not defined on Pebble SDK 3.11
27 
28 /* These macros use decltype or the earlier __typeof GNU extension.
29  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
30  when compiling c++ source) this code uses whatever method is needed
31  or, for VS2008 where neither is available, uses casting workarounds. */
32 #if defined(_MSC_VER) /* MS compiler */
33 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
34 #define DECLTYPE(x) (decltype(x))
35 #else /* VS2008 or older (or VS2010 in C mode) */
36 #define NO_DECLTYPE
37 #define DECLTYPE(x)
38 #endif
39 #elif defined(__BORLANDC__) || defined(__LCC__) || defined(__WATCOMC__)
40 #define NO_DECLTYPE
41 #define DECLTYPE(x)
42 #else /* GNU, Sun and other compilers */
43 #define DECLTYPE(x) (__typeof(x))
44 #endif
45 
46 #ifdef NO_DECLTYPE
47 #define DECLTYPE_ASSIGN(dst,src) \
48 do { \
49  char **_da_dst = (char**)(&(dst)); \
50  *_da_dst = (char*)(src); \
51 } while(0)
52 #else
53 #define DECLTYPE_ASSIGN(dst,src) \
54 do { \
55  (dst) = DECLTYPE(dst)(src); \
56 } while(0)
57 #endif
58 
59 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
60 #if defined(_WIN32)
61 #if defined(_MSC_VER) && _MSC_VER >= 1600
62 #include <stdint.h>
63 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
64 #include <stdint.h>
65 #else
66 typedef unsigned int uint32_t;
67 typedef unsigned char uint8_t;
68 #endif
69 #elif defined(__GNUC__) && !defined(__VXWORKS__)
70 #include <stdint.h>
71 #else
72 typedef unsigned int uint32_t;
73 typedef unsigned char uint8_t;
74 #endif
75 
76 #define UTHASH_VERSION 1.9.9
77 
78 // JRB: exit() is not available in Pebble SDK 3.11, so changed to window_stack_pop_all()
79 #ifndef uthash_fatal
80 #define uthash_fatal(msg) window_stack_pop_all(false) /* fatal error (out of memory,etc) */
81 #endif
82 #ifndef uthash_malloc
83 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
84 #endif
85 #ifndef uthash_free
86 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
87 #endif
88 
89 #ifndef uthash_noexpand_fyi
90 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
91 #endif
92 #ifndef uthash_expand_fyi
93 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
94 #endif
95 
96 /* initial number of buckets */
97 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
98 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
99 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
100 
101 /* calculate the element whose hash handle address is hhe */
102 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
103 
104 #define HASH_FIND(hh,head,keyptr,keylen,out) \
105 do { \
106  out=NULL; \
107  if (head != NULL) { \
108  unsigned _hf_bkt,_hf_hashv; \
109  HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
110  if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv) != 0) { \
111  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
112  keyptr,keylen,out); \
113  } \
114  } \
115 } while (0)
116 
117 #ifdef HASH_BLOOM
118 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
119 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
120 #define HASH_BLOOM_MAKE(tbl) \
121 do { \
122  (tbl)->bloom_nbits = HASH_BLOOM; \
123  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
124  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
125  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
126  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
127 } while (0)
128 
129 #define HASH_BLOOM_FREE(tbl) \
130 do { \
131  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
132 } while (0)
133 
134 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
135 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
136 
137 #define HASH_BLOOM_ADD(tbl,hashv) \
138  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
139 
140 #define HASH_BLOOM_TEST(tbl,hashv) \
141  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
142 
143 #else
144 #define HASH_BLOOM_MAKE(tbl)
145 #define HASH_BLOOM_FREE(tbl)
146 #define HASH_BLOOM_ADD(tbl,hashv)
147 #define HASH_BLOOM_TEST(tbl,hashv) (1)
148 #define HASH_BLOOM_BYTELEN 0U
149 #endif
150 
151 #define HASH_MAKE_TABLE(hh,head) \
152 do { \
153  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
154  sizeof(UT_hash_table)); \
155  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
156  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
157  (head)->hh.tbl->tail = &((head)->hh); \
158  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
159  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
160  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
161  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
162  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
163  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
164  memset((head)->hh.tbl->buckets, 0, \
165  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
166  HASH_BLOOM_MAKE((head)->hh.tbl); \
167  (head)->hh.tbl->signature = HASH_SIGNATURE; \
168 } while(0)
169 
170 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
171  HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add)
172 
173 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
174 do { \
175  replaced=NULL; \
176  HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \
177  if (replaced!=NULL) { \
178  HASH_DELETE(hh,head,replaced); \
179  } \
180  HASH_ADD(hh,head,fieldname,keylen_in,add); \
181 } while(0)
182 
183 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
184 do { \
185  unsigned _ha_bkt; \
186  (add)->hh.next = NULL; \
187  (add)->hh.key = (char*)(keyptr); \
188  (add)->hh.keylen = (unsigned)(keylen_in); \
189  if (!(head)) { \
190  head = (add); \
191  (head)->hh.prev = NULL; \
192  HASH_MAKE_TABLE(hh,head); \
193  } else { \
194  (head)->hh.tbl->tail->next = (add); \
195  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
196  (head)->hh.tbl->tail = &((add)->hh); \
197  } \
198  (head)->hh.tbl->num_items++; \
199  (add)->hh.tbl = (head)->hh.tbl; \
200  HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
201  (add)->hh.hashv, _ha_bkt); \
202  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
203  HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
204  HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
205  HASH_FSCK(hh,head); \
206 } while(0)
207 
208 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \
209 do { \
210  bkt = ((hashv) & ((num_bkts) - 1U)); \
211 } while(0)
212 
213 /* delete "delptr" from the hash table.
214  * "the usual" patch-up process for the app-order doubly-linked-list.
215  * The use of _hd_hh_del below deserves special explanation.
216  * These used to be expressed using (delptr) but that led to a bug
217  * if someone used the same symbol for the head and deletee, like
218  * HASH_DELETE(hh,users,users);
219  * We want that to work, but by changing the head (users) below
220  * we were forfeiting our ability to further refer to the deletee (users)
221  * in the patch-up process. Solution: use scratch space to
222  * copy the deletee pointer, then the latter references are via that
223  * scratch pointer rather than through the repointed (users) symbol.
224  */
225 #define HASH_DELETE(hh,head,delptr) \
226 do { \
227  struct UT_hash_handle *_hd_hh_del; \
228  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
229  uthash_free((head)->hh.tbl->buckets, \
230  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
231  HASH_BLOOM_FREE((head)->hh.tbl); \
232  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
233  head = NULL; \
234  } else { \
235  unsigned _hd_bkt; \
236  _hd_hh_del = &((delptr)->hh); \
237  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
238  (head)->hh.tbl->tail = \
239  (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
240  (head)->hh.tbl->hho); \
241  } \
242  if ((delptr)->hh.prev != NULL) { \
243  ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
244  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
245  } else { \
246  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
247  } \
248  if (_hd_hh_del->next != NULL) { \
249  ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \
250  (head)->hh.tbl->hho))->prev = \
251  _hd_hh_del->prev; \
252  } \
253  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
254  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
255  (head)->hh.tbl->num_items--; \
256  } \
257  HASH_FSCK(hh,head); \
258 } while (0)
259 
260 
261 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
262 #define HASH_FIND_STR(head,findstr,out) \
263  HASH_FIND(hh,head,findstr,(unsigned)strlen(findstr),out)
264 #define HASH_ADD_STR(head,strfield,add) \
265  HASH_ADD(hh,head,strfield[0],(unsigned int)strlen(add->strfield),add)
266 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
267  HASH_REPLACE(hh,head,strfield[0],(unsigned)strlen(add->strfield),add,replaced)
268 #define HASH_FIND_INT(head,findint,out) \
269  HASH_FIND(hh,head,findint,sizeof(int),out)
270 #define HASH_ADD_INT(head,intfield,add) \
271  HASH_ADD(hh,head,intfield,sizeof(int),add)
272 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
273  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
274 #define HASH_FIND_PTR(head,findptr,out) \
275  HASH_FIND(hh,head,findptr,sizeof(void *),out)
276 #define HASH_ADD_PTR(head,ptrfield,add) \
277  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
278 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
279  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
280 #define HASH_DEL(head,delptr) \
281  HASH_DELETE(hh,head,delptr)
282 
283 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
284  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
285  */
286 #ifdef HASH_DEBUG
287 // JRB: exit() is not defined in Pebble SDK 3.11, so changed to window_stack_pop_all()
288 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); window_stack_pop_all(false); } while (0)
289 #define HASH_FSCK(hh,head) \
290 do { \
291  struct UT_hash_handle *_thh; \
292  if (head) { \
293  unsigned _bkt_i; \
294  unsigned _count; \
295  char *_prev; \
296  _count = 0; \
297  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
298  unsigned _bkt_count = 0; \
299  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
300  _prev = NULL; \
301  while (_thh) { \
302  if (_prev != (char*)(_thh->hh_prev)) { \
303  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
304  _thh->hh_prev, _prev ); \
305  } \
306  _bkt_count++; \
307  _prev = (char*)(_thh); \
308  _thh = _thh->hh_next; \
309  } \
310  _count += _bkt_count; \
311  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
312  HASH_OOPS("invalid bucket count %u, actual %u\n", \
313  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
314  } \
315  } \
316  if (_count != (head)->hh.tbl->num_items) { \
317  HASH_OOPS("invalid hh item count %u, actual %u\n", \
318  (head)->hh.tbl->num_items, _count ); \
319  } \
320  /* traverse hh in app order; check next/prev integrity, count */ \
321  _count = 0; \
322  _prev = NULL; \
323  _thh = &(head)->hh; \
324  while (_thh) { \
325  _count++; \
326  if (_prev !=(char*)(_thh->prev)) { \
327  HASH_OOPS("invalid prev %p, actual %p\n", \
328  _thh->prev, _prev ); \
329  } \
330  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
331  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
332  (head)->hh.tbl->hho) : NULL ); \
333  } \
334  if (_count != (head)->hh.tbl->num_items) { \
335  HASH_OOPS("invalid app item count %u, actual %u\n", \
336  (head)->hh.tbl->num_items, _count ); \
337  } \
338  } \
339 } while (0)
340 #else
341 #define HASH_FSCK(hh,head)
342 #endif
343 
344 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
345  * the descriptor to which this macro is defined for tuning the hash function.
346  * The app can #include <unistd.h> to get the prototype for write(2). */
347 #ifdef HASH_EMIT_KEYS
348 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
349 do { \
350  unsigned _klen = fieldlen; \
351  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
352  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
353 } while (0)
354 #else
355 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
356 #endif
357 
358 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
359 #ifdef HASH_FUNCTION
360 #define HASH_FCN HASH_FUNCTION
361 #else
362 #define HASH_FCN HASH_JEN
363 #endif
364 
365 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
366 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
367 do { \
368  unsigned _hb_keylen=(unsigned)keylen; \
369  const unsigned char *_hb_key=(const unsigned char*)(key); \
370  (hashv) = 0; \
371  while (_hb_keylen-- != 0U) { \
372  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
373  } \
374  bkt = (hashv) & (num_bkts-1U); \
375 } while (0)
376 
377 
378 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
379  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
380 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
381 do { \
382  unsigned _sx_i; \
383  const unsigned char *_hs_key=(const unsigned char*)(key); \
384  hashv = 0; \
385  for(_sx_i=0; _sx_i < keylen; _sx_i++) { \
386  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
387  } \
388  bkt = hashv & (num_bkts-1U); \
389 } while (0)
390 /* FNV-1a variation */
391 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
392 do { \
393  unsigned _fn_i; \
394  const unsigned char *_hf_key=(const unsigned char*)(key); \
395  hashv = 2166136261U; \
396  for(_fn_i=0; _fn_i < keylen; _fn_i++) { \
397  hashv = hashv ^ _hf_key[_fn_i]; \
398  hashv = hashv * 16777619U; \
399  } \
400  bkt = hashv & (num_bkts-1U); \
401 } while(0)
402 
403 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
404 do { \
405  unsigned _ho_i; \
406  const unsigned char *_ho_key=(const unsigned char*)(key); \
407  hashv = 0; \
408  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
409  hashv += _ho_key[_ho_i]; \
410  hashv += (hashv << 10); \
411  hashv ^= (hashv >> 6); \
412  } \
413  hashv += (hashv << 3); \
414  hashv ^= (hashv >> 11); \
415  hashv += (hashv << 15); \
416  bkt = hashv & (num_bkts-1U); \
417 } while(0)
418 
419 #define HASH_JEN_MIX(a,b,c) \
420 do { \
421  a -= b; a -= c; a ^= ( c >> 13 ); \
422  b -= c; b -= a; b ^= ( a << 8 ); \
423  c -= a; c -= b; c ^= ( b >> 13 ); \
424  a -= b; a -= c; a ^= ( c >> 12 ); \
425  b -= c; b -= a; b ^= ( a << 16 ); \
426  c -= a; c -= b; c ^= ( b >> 5 ); \
427  a -= b; a -= c; a ^= ( c >> 3 ); \
428  b -= c; b -= a; b ^= ( a << 10 ); \
429  c -= a; c -= b; c ^= ( b >> 15 ); \
430 } while (0)
431 
432 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
433 do { \
434  unsigned _hj_i,_hj_j,_hj_k; \
435  unsigned const char *_hj_key=(unsigned const char*)(key); \
436  hashv = 0xfeedbeefu; \
437  _hj_i = _hj_j = 0x9e3779b9u; \
438  _hj_k = (unsigned)(keylen); \
439  while (_hj_k >= 12U) { \
440  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
441  + ( (unsigned)_hj_key[2] << 16 ) \
442  + ( (unsigned)_hj_key[3] << 24 ) ); \
443  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
444  + ( (unsigned)_hj_key[6] << 16 ) \
445  + ( (unsigned)_hj_key[7] << 24 ) ); \
446  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
447  + ( (unsigned)_hj_key[10] << 16 ) \
448  + ( (unsigned)_hj_key[11] << 24 ) ); \
449  \
450  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
451  \
452  _hj_key += 12; \
453  _hj_k -= 12U; \
454  } \
455  hashv += (unsigned)(keylen); \
456  switch ( _hj_k ) { \
457  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
458  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
459  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
460  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
461  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
462  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
463  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
464  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
465  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
466  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
467  case 1: _hj_i += _hj_key[0]; \
468  } \
469  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
470  bkt = hashv & (num_bkts-1U); \
471 } while(0)
472 
473 /* The Paul Hsieh hash function */
474 #undef get16bits
475 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
476  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
477 #define get16bits(d) (*((const uint16_t *) (d)))
478 #endif
479 
480 #if !defined (get16bits)
481 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
482  +(uint32_t)(((const uint8_t *)(d))[0]) )
483 #endif
484 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
485 do { \
486  unsigned const char *_sfh_key=(unsigned const char*)(key); \
487  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
488  \
489  unsigned _sfh_rem = _sfh_len & 3U; \
490  _sfh_len >>= 2; \
491  hashv = 0xcafebabeu; \
492  \
493  /* Main loop */ \
494  for (;_sfh_len > 0U; _sfh_len--) { \
495  hashv += get16bits (_sfh_key); \
496  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
497  hashv = (hashv << 16) ^ _sfh_tmp; \
498  _sfh_key += 2U*sizeof (uint16_t); \
499  hashv += hashv >> 11; \
500  } \
501  \
502  /* Handle end cases */ \
503  switch (_sfh_rem) { \
504  case 3: hashv += get16bits (_sfh_key); \
505  hashv ^= hashv << 16; \
506  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
507  hashv += hashv >> 11; \
508  break; \
509  case 2: hashv += get16bits (_sfh_key); \
510  hashv ^= hashv << 11; \
511  hashv += hashv >> 17; \
512  break; \
513  case 1: hashv += *_sfh_key; \
514  hashv ^= hashv << 10; \
515  hashv += hashv >> 1; \
516  } \
517  \
518  /* Force "avalanching" of final 127 bits */ \
519  hashv ^= hashv << 3; \
520  hashv += hashv >> 5; \
521  hashv ^= hashv << 4; \
522  hashv += hashv >> 17; \
523  hashv ^= hashv << 25; \
524  hashv += hashv >> 6; \
525  bkt = hashv & (num_bkts-1U); \
526 } while(0)
527 
528 #ifdef HASH_USING_NO_STRICT_ALIASING
529 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
530  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
531  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
532  *
533  * Note the preprocessor built-in defines can be emitted using:
534  *
535  * gcc -m64 -dM -E - < /dev/null (on gcc)
536  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
537  */
538 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
539 #define MUR_GETBLOCK(p,i) p[i]
540 #else /* non intel */
541 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
542 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
543 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
544 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
545 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
546 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
547 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
548 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
549 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
550 #else /* assume little endian non-intel */
551 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
552 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
553 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
554 #endif
555 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
556  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
557  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
558  MUR_ONE_THREE(p))))
559 #endif
560 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
561 #define MUR_FMIX(_h) \
562 do { \
563  _h ^= _h >> 16; \
564  _h *= 0x85ebca6bu; \
565  _h ^= _h >> 13; \
566  _h *= 0xc2b2ae35u; \
567  _h ^= _h >> 16; \
568 } while(0)
569 
570 #define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \
571 do { \
572  const uint8_t *_mur_data = (const uint8_t*)(key); \
573  const int _mur_nblocks = (int)(keylen) / 4; \
574  uint32_t _mur_h1 = 0xf88D5353u; \
575  uint32_t _mur_c1 = 0xcc9e2d51u; \
576  uint32_t _mur_c2 = 0x1b873593u; \
577  uint32_t _mur_k1 = 0; \
578  const uint8_t *_mur_tail; \
579  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
580  int _mur_i; \
581  for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \
582  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
583  _mur_k1 *= _mur_c1; \
584  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
585  _mur_k1 *= _mur_c2; \
586  \
587  _mur_h1 ^= _mur_k1; \
588  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
589  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
590  } \
591  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
592  _mur_k1=0; \
593  switch((keylen) & 3U) { \
594  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
595  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
596  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
597  _mur_k1 *= _mur_c1; \
598  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
599  _mur_k1 *= _mur_c2; \
600  _mur_h1 ^= _mur_k1; \
601  } \
602  _mur_h1 ^= (uint32_t)(keylen); \
603  MUR_FMIX(_mur_h1); \
604  hashv = _mur_h1; \
605  bkt = hashv & (num_bkts-1U); \
606 } while(0)
607 #endif /* HASH_USING_NO_STRICT_ALIASING */
608 
609 /* key comparison function; return 0 if keys equal */
610 #define HASH_KEYCMP(a,b,len) memcmp(a,b,(unsigned long)(len))
611 
612 /* iterate over items in a known bucket to find desired item */
613 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
614 do { \
615  if (head.hh_head != NULL) { DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); } \
616  else { out=NULL; } \
617  while (out != NULL) { \
618  if ((out)->hh.keylen == (keylen_in)) { \
619  if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) { break; } \
620  } \
621  if ((out)->hh.hh_next != NULL) { DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); } \
622  else { out = NULL; } \
623  } \
624 } while(0)
625 
626 /* add an item to a bucket */
627 #define HASH_ADD_TO_BKT(head,addhh) \
628 do { \
629  head.count++; \
630  (addhh)->hh_next = head.hh_head; \
631  (addhh)->hh_prev = NULL; \
632  if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \
633  (head).hh_head=addhh; \
634  if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \
635  && ((addhh)->tbl->noexpand != 1U)) { \
636  HASH_EXPAND_BUCKETS((addhh)->tbl); \
637  } \
638 } while(0)
639 
640 /* remove an item from a given bucket */
641 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
642  (head).count--; \
643  if ((head).hh_head == hh_del) { \
644  (head).hh_head = hh_del->hh_next; \
645  } \
646  if (hh_del->hh_prev) { \
647  hh_del->hh_prev->hh_next = hh_del->hh_next; \
648  } \
649  if (hh_del->hh_next) { \
650  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
651  }
652 
653 /* Bucket expansion has the effect of doubling the number of buckets
654  * and redistributing the items into the new buckets. Ideally the
655  * items will distribute more or less evenly into the new buckets
656  * (the extent to which this is true is a measure of the quality of
657  * the hash function as it applies to the key domain).
658  *
659  * With the items distributed into more buckets, the chain length
660  * (item count) in each bucket is reduced. Thus by expanding buckets
661  * the hash keeps a bound on the chain length. This bounded chain
662  * length is the essence of how a hash provides constant time lookup.
663  *
664  * The calculation of tbl->ideal_chain_maxlen below deserves some
665  * explanation. First, keep in mind that we're calculating the ideal
666  * maximum chain length based on the *new* (doubled) bucket count.
667  * In fractions this is just n/b (n=number of items,b=new num buckets).
668  * Since the ideal chain length is an integer, we want to calculate
669  * ceil(n/b). We don't depend on floating point arithmetic in this
670  * hash, so to calculate ceil(n/b) with integers we could write
671  *
672  * ceil(n/b) = (n/b) + ((n%b)?1:0)
673  *
674  * and in fact a previous version of this hash did just that.
675  * But now we have improved things a bit by recognizing that b is
676  * always a power of two. We keep its base 2 log handy (call it lb),
677  * so now we can write this with a bit shift and logical AND:
678  *
679  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
680  *
681  */
682 #define HASH_EXPAND_BUCKETS(tbl) \
683 do { \
684  unsigned _he_bkt; \
685  unsigned _he_bkt_i; \
686  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
687  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
688  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
689  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
690  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
691  memset(_he_new_buckets, 0, \
692  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
693  tbl->ideal_chain_maxlen = \
694  (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \
695  (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
696  tbl->nonideal_items = 0; \
697  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
698  { \
699  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
700  while (_he_thh != NULL) { \
701  _he_hh_nxt = _he_thh->hh_next; \
702  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \
703  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
704  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
705  tbl->nonideal_items++; \
706  _he_newbkt->expand_mult = _he_newbkt->count / \
707  tbl->ideal_chain_maxlen; \
708  } \
709  _he_thh->hh_prev = NULL; \
710  _he_thh->hh_next = _he_newbkt->hh_head; \
711  if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \
712  _he_thh; } \
713  _he_newbkt->hh_head = _he_thh; \
714  _he_thh = _he_hh_nxt; \
715  } \
716  } \
717  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
718  tbl->num_buckets *= 2U; \
719  tbl->log2_num_buckets++; \
720  tbl->buckets = _he_new_buckets; \
721  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
722  (tbl->ineff_expands+1U) : 0U; \
723  if (tbl->ineff_expands > 1U) { \
724  tbl->noexpand=1; \
725  uthash_noexpand_fyi(tbl); \
726  } \
727  uthash_expand_fyi(tbl); \
728 } while(0)
729 
730 
731 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
732 /* Note that HASH_SORT assumes the hash handle name to be hh.
733  * HASH_SRT was added to allow the hash handle name to be passed in. */
734 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
735 #define HASH_SRT(hh,head,cmpfcn) \
736 do { \
737  unsigned _hs_i; \
738  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
739  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
740  if (head != NULL) { \
741  _hs_insize = 1; \
742  _hs_looping = 1; \
743  _hs_list = &((head)->hh); \
744  while (_hs_looping != 0U) { \
745  _hs_p = _hs_list; \
746  _hs_list = NULL; \
747  _hs_tail = NULL; \
748  _hs_nmerges = 0; \
749  while (_hs_p != NULL) { \
750  _hs_nmerges++; \
751  _hs_q = _hs_p; \
752  _hs_psize = 0; \
753  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
754  _hs_psize++; \
755  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
756  ((void*)((char*)(_hs_q->next) + \
757  (head)->hh.tbl->hho)) : NULL); \
758  if (! (_hs_q) ) { break; } \
759  } \
760  _hs_qsize = _hs_insize; \
761  while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\
762  if (_hs_psize == 0U) { \
763  _hs_e = _hs_q; \
764  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
765  ((void*)((char*)(_hs_q->next) + \
766  (head)->hh.tbl->hho)) : NULL); \
767  _hs_qsize--; \
768  } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \
769  _hs_e = _hs_p; \
770  if (_hs_p != NULL){ \
771  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
772  ((void*)((char*)(_hs_p->next) + \
773  (head)->hh.tbl->hho)) : NULL); \
774  } \
775  _hs_psize--; \
776  } else if (( \
777  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
778  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
779  ) <= 0) { \
780  _hs_e = _hs_p; \
781  if (_hs_p != NULL){ \
782  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
783  ((void*)((char*)(_hs_p->next) + \
784  (head)->hh.tbl->hho)) : NULL); \
785  } \
786  _hs_psize--; \
787  } else { \
788  _hs_e = _hs_q; \
789  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
790  ((void*)((char*)(_hs_q->next) + \
791  (head)->hh.tbl->hho)) : NULL); \
792  _hs_qsize--; \
793  } \
794  if ( _hs_tail != NULL ) { \
795  _hs_tail->next = ((_hs_e != NULL) ? \
796  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
797  } else { \
798  _hs_list = _hs_e; \
799  } \
800  if (_hs_e != NULL) { \
801  _hs_e->prev = ((_hs_tail != NULL) ? \
802  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
803  } \
804  _hs_tail = _hs_e; \
805  } \
806  _hs_p = _hs_q; \
807  } \
808  if (_hs_tail != NULL){ \
809  _hs_tail->next = NULL; \
810  } \
811  if ( _hs_nmerges <= 1U ) { \
812  _hs_looping=0; \
813  (head)->hh.tbl->tail = _hs_tail; \
814  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
815  } \
816  _hs_insize *= 2U; \
817  } \
818  HASH_FSCK(hh,head); \
819  } \
820 } while (0)
821 
822 /* This function selects items from one hash into another hash.
823  * The end result is that the selected items have dual presence
824  * in both hashes. There is no copy of the items made; rather
825  * they are added into the new hash through a secondary hash
826  * hash handle that must be present in the structure. */
827 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
828 do { \
829  unsigned _src_bkt, _dst_bkt; \
830  void *_last_elt=NULL, *_elt; \
831  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
832  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
833  if (src != NULL) { \
834  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
835  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
836  _src_hh != NULL; \
837  _src_hh = _src_hh->hh_next) { \
838  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
839  if (cond(_elt)) { \
840  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
841  _dst_hh->key = _src_hh->key; \
842  _dst_hh->keylen = _src_hh->keylen; \
843  _dst_hh->hashv = _src_hh->hashv; \
844  _dst_hh->prev = _last_elt; \
845  _dst_hh->next = NULL; \
846  if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \
847  if (dst == NULL) { \
848  DECLTYPE_ASSIGN(dst,_elt); \
849  HASH_MAKE_TABLE(hh_dst,dst); \
850  } else { \
851  _dst_hh->tbl = (dst)->hh_dst.tbl; \
852  } \
853  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
854  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
855  (dst)->hh_dst.tbl->num_items++; \
856  _last_elt = _elt; \
857  _last_elt_hh = _dst_hh; \
858  } \
859  } \
860  } \
861  } \
862  HASH_FSCK(hh_dst,dst); \
863 } while (0)
864 
865 #define HASH_CLEAR(hh,head) \
866 do { \
867  if (head != NULL) { \
868  uthash_free((head)->hh.tbl->buckets, \
869  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
870  HASH_BLOOM_FREE((head)->hh.tbl); \
871  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
872  (head)=NULL; \
873  } \
874 } while(0)
875 
876 #define HASH_OVERHEAD(hh,head) \
877  ((head != NULL) ? ( \
878  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
879  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
880  sizeof(UT_hash_table) + \
881  (HASH_BLOOM_BYTELEN))) : 0U)
882 
883 #ifdef NO_DECLTYPE
884 #define HASH_ITER(hh,head,el,tmp) \
885 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
886  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
887 #else
888 #define HASH_ITER(hh,head,el,tmp) \
889 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
890  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
891 #endif
892 
893 /* obtain a count of items in the hash */
894 #define HASH_COUNT(head) HASH_CNT(hh,head)
895 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
896 
897 typedef struct UT_hash_bucket {
899  unsigned count;
900 
901  /* expand_mult is normally set to 0. In this situation, the max chain length
902  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
903  * the bucket's chain exceeds this length, bucket expansion is triggered).
904  * However, setting expand_mult to a non-zero value delays bucket expansion
905  * (that would be triggered by additions to this particular bucket)
906  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
907  * (The multiplier is simply expand_mult+1). The whole idea of this
908  * multiplier is to reduce bucket expansions, since they are expensive, in
909  * situations where we know that a particular bucket tends to be overused.
910  * It is better to let its chain length grow to a longer yet-still-bounded
911  * value, than to do an O(n) bucket expansion too often.
912  */
913  unsigned expand_mult;
914 
916 
917 /* random signature used only to find hash tables in external analysis */
918 #define HASH_SIGNATURE 0xa0111fe1u
919 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
920 
921 typedef struct UT_hash_table {
923  unsigned num_buckets, log2_num_buckets;
924  unsigned num_items;
925  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
926  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
927 
928  /* in an ideal situation (all buckets used equally), no bucket would have
929  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
931 
932  /* nonideal_items is the number of items in the hash whose chain position
933  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
934  * hash distribution; reaching them in a chain traversal takes >ideal steps */
935  unsigned nonideal_items;
936 
937  /* ineffective expands occur when a bucket doubling was performed, but
938  * afterward, more than half the items in the hash had nonideal chain
939  * positions. If this happens on two consecutive expansions we inhibit any
940  * further expansion, as it's not helping; this happens when the hash
941  * function isn't a good fit for the key domain. When expansion is inhibited
942  * the hash will still work, albeit no longer in constant time. */
943  unsigned ineff_expands, noexpand;
944 
945  uint32_t signature; /* used only to find hash tables in external analysis */
946 #ifdef HASH_BLOOM
947  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
948  uint8_t *bloom_bv;
949  uint8_t bloom_nbits;
950 #endif
951 
952 } UT_hash_table;
953 
954 typedef struct UT_hash_handle {
956  void *prev; /* prev element in app order */
957  void *next; /* next element in app order */
958  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
959  struct UT_hash_handle *hh_next; /* next hh in bucket order */
960  void *key; /* ptr to enclosing struct's key */
961  unsigned keylen; /* enclosing struct's key len */
962  unsigned hashv; /* result of hash-fcn(key) */
964 
965 #endif /* UTHASH_H */
struct UT_hash_handle * hh_next
Definition: uthash.h:959
UT_hash_bucket * buckets
Definition: uthash.h:922
void * key
Definition: uthash.h:960
unsigned keylen
Definition: uthash.h:961
struct UT_hash_table * tbl
Definition: uthash.h:955
struct UT_hash_handle * hh_prev
Definition: uthash.h:958
unsigned hashv
Definition: uthash.h:962
unsigned expand_mult
Definition: uthash.h:913
unsigned count
Definition: uthash.h:899
uint32_t signature
Definition: uthash.h:945
unsigned num_items
Definition: uthash.h:924
void * prev
Definition: uthash.h:956
unsigned num_buckets
Definition: uthash.h:923
unsigned noexpand
Definition: uthash.h:943
struct UT_hash_handle * tail
Definition: uthash.h:925
struct UT_hash_bucket UT_hash_bucket
unsigned int uint32_t
Definition: uthash.h:72
Definition: uthash.h:921
ptrdiff_t hho
Definition: uthash.h:926
struct UT_hash_table UT_hash_table
unsigned ideal_chain_maxlen
Definition: uthash.h:930
struct UT_hash_handle UT_hash_handle
Definition: uthash.h:897
void * next
Definition: uthash.h:957
struct UT_hash_handle * hh_head
Definition: uthash.h:898
unsigned char uint8_t
Definition: uthash.h:73
Definition: uthash.h:954
unsigned nonideal_items
Definition: uthash.h:935