-
-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
Copy pathiddict.c
197 lines (175 loc) · 6.33 KB
/
iddict.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// This file is a part of Julia. License is MIT: https://julialang.org/license
#define hash_size(h) (h->length / 2)
// compute empirical max-probe for a given size
#define max_probe(size) ((size) <= 1024 ? 16 : (size) >> 6)
#define keyhash(k) jl_object_id_(jl_typetagof(k), k)
#define h2index(hv, sz) (size_t)(((hv) & ((sz)-1)) * 2)
static inline int jl_table_assign_bp(jl_genericmemory_t **pa, jl_value_t *key, jl_value_t *val);
JL_DLLEXPORT jl_genericmemory_t *jl_idtable_rehash(jl_genericmemory_t *a, size_t newsz)
{
size_t sz = a->length;
size_t i;
jl_value_t **ol = (jl_value_t **) a->ptr;
jl_genericmemory_t *newa = jl_alloc_memory_any(newsz);
// keep the original memory in the original slot since we need `ol`
// to be valid in the loop below.
JL_GC_PUSH2(&newa, &a);
for (i = 0; i < sz; i += 2) {
if (ol[i + 1] != NULL) {
jl_table_assign_bp(&newa, ol[i], ol[i + 1]);
// it is however necessary here because allocation
// can (and will) occur in a recursive call inside table_lookup_bp
}
}
JL_GC_POP();
return newa;
}
static inline int jl_table_assign_bp(jl_genericmemory_t **pa, jl_value_t *key, jl_value_t *val)
{
// pa points to a **un**rooted address
uint_t hv;
jl_genericmemory_t *a = *pa;
size_t orig, index, iter, empty_slot;
size_t newsz, sz = hash_size(a);
if (sz == 0) {
a = jl_alloc_memory_any(HT_N_INLINE);
sz = hash_size(a);
*pa = a;
}
size_t maxprobe = max_probe(sz);
_Atomic(jl_value_t*) *tab = (_Atomic(jl_value_t*)*) a->ptr;
hv = keyhash(key);
while (1) {
iter = 0;
index = h2index(hv, sz);
sz *= 2;
orig = index;
empty_slot = -1;
do {
jl_value_t *k2 = jl_atomic_load_relaxed(&tab[index]);
if (k2 == NULL) {
if (empty_slot == -1)
empty_slot = index;
break;
}
if (jl_egal(key, k2)) {
if (jl_atomic_load_relaxed(&tab[index + 1]) != NULL) {
jl_atomic_store_release(&tab[index + 1], val);
jl_gc_wb(a, val);
return 0;
}
// `nothing` is our sentinel value for deletion, so need to keep searching if it's also our search key
assert(key == jl_nothing);
if (empty_slot == -1)
empty_slot = index;
}
if (empty_slot == -1 && jl_atomic_load_relaxed(&tab[index + 1]) == NULL) {
assert(jl_atomic_load_relaxed(&tab[index]) == jl_nothing);
empty_slot = index;
}
index = (index + 2) & (sz - 1);
iter++;
} while (iter <= maxprobe && index != orig);
if (empty_slot != -1) {
jl_atomic_store_release(&tab[empty_slot], key);
jl_gc_wb(a, key);
jl_atomic_store_release(&tab[empty_slot + 1], val);
jl_gc_wb(a, val);
return 1;
}
/* table full */
/* quadruple size, rehash, retry the insert */
/* it's important to grow the table really fast; otherwise we waste */
/* lots of time rehashing all the keys over and over. */
sz = a -> length;
if (sz < HT_N_INLINE)
newsz = HT_N_INLINE;
else if (sz >= (1 << 19) || (sz <= (1 << 8)))
newsz = sz << 1;
else
newsz = sz << 2;
*pa = jl_idtable_rehash(*pa, newsz);
a = *pa;
tab = (_Atomic(jl_value_t*)*) a->ptr;
sz = hash_size(a);
maxprobe = max_probe(sz);
}
}
/* returns bp if key is in hash, otherwise NULL */
inline _Atomic(jl_value_t*) *jl_table_peek_bp(jl_genericmemory_t *a, jl_value_t *key) JL_NOTSAFEPOINT
{
size_t sz = hash_size(a);
if (sz == 0)
return NULL;
size_t maxprobe = max_probe(sz);
_Atomic(jl_value_t*) *tab = (_Atomic(jl_value_t*)*) a->ptr;
uint_t hv = keyhash(key);
size_t index = h2index(hv, sz);
sz *= 2;
size_t orig = index;
size_t iter = 0;
do {
jl_value_t *k2 = jl_atomic_load_relaxed(&tab[index]); // just to ensure the load doesn't get duplicated
if (k2 == NULL)
return NULL;
if (jl_egal(key, k2)) {
if (jl_atomic_load_relaxed(&tab[index + 1]) != NULL)
return &tab[index + 1];
// `nothing` is our sentinel value for deletion, so need to keep searching if it's also our search key
if (key != jl_nothing)
return NULL; // concurrent insertion hasn't completed yet
}
index = (index + 2) & (sz - 1);
iter++;
} while (iter <= maxprobe && index != orig);
return NULL;
}
JL_DLLEXPORT
jl_genericmemory_t *jl_eqtable_put(jl_genericmemory_t *h, jl_value_t *key, jl_value_t *val, int *p_inserted)
{
int inserted = jl_table_assign_bp(&h, key, val);
if (p_inserted)
*p_inserted = inserted;
return h;
}
// Note: lookup in the IdDict is permitted concurrently, if you avoid deletions,
// and assuming you do use an external lock around all insertions
JL_DLLEXPORT
jl_value_t *jl_eqtable_get(jl_genericmemory_t *h, jl_value_t *key, jl_value_t *deflt) JL_NOTSAFEPOINT
{
_Atomic(jl_value_t*) *bp = jl_table_peek_bp(h, key);
return (bp == NULL) ? deflt : jl_atomic_load_relaxed(bp);
}
jl_value_t *jl_eqtable_getkey(jl_genericmemory_t *h, jl_value_t *key, jl_value_t *deflt) JL_NOTSAFEPOINT
{
_Atomic(jl_value_t*) *bp = jl_table_peek_bp(h, key);
return (bp == NULL) ? deflt : jl_atomic_load_relaxed(bp - 1);
}
JL_DLLEXPORT
jl_value_t *jl_eqtable_pop(jl_genericmemory_t *h, jl_value_t *key, jl_value_t *deflt, int *found)
{
_Atomic(jl_value_t*) *bp = jl_table_peek_bp(h, key);
if (found)
*found = (bp != NULL);
if (bp == NULL)
return deflt;
jl_value_t *val = jl_atomic_load_relaxed(bp);
jl_atomic_store_relaxed(bp - 1, jl_nothing); // clear the key
jl_atomic_store_relaxed(bp, NULL); // and the value (briefly corrupting the table)
return val;
}
JL_DLLEXPORT
size_t jl_eqtable_nextind(jl_genericmemory_t *t, size_t i)
{
if (i & 1)
i++;
size_t alen = t->length;
while (i < alen && ((void**) t->ptr)[i + 1] == NULL)
i += 2;
if (i >= alen)
return (size_t)-1;
return i;
}
#undef hash_size
#undef max_probe
#undef h2index