code / tomo

Lines41.3K C23.7K Markdown9.7K YAML5.0K Tomo2.3K
7 others 763
Python231 Shell230 make212 INI47 Text21 SVG16 Lua6
(734 lines)
1 // Hash table (aka Dictionary) Implementation
2 // Hash keys and values are stored *by value*
3 // The hash insertion/lookup implementation is based on Lua's tables,
4 // which use a chained scatter with Brent's variation.
6 #include <assert.h>
7 #include <gc.h>
8 #include <stdarg.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/param.h>
14 #include "c_strings.h"
15 #include "datatypes.h"
16 #include "lists.h"
17 #include "memory.h"
18 #include "metamethods.h"
19 #include "pointers.h"
20 #include "siphash.h"
21 #include "structs.h"
22 #include "tables.h"
23 #include "text.h"
24 #include "types.h"
25 #include "util.h"
27 // Helper accessors for type functions/values:
28 #define HASH_KEY(t, k) (generic_hash((k), type->TableInfo.key) % ((t).bucket_info->count))
29 #define EQUAL_KEYS(x, y) (generic_equal((x), (y), type->TableInfo.key))
30 #define END_OF_CHAIN UINT32_MAX
32 #define GET_ENTRY(t, i) ((t).entries.data + (t).entries.stride * (i))
34 static TypeInfo_t MemoryPointer = {
35 .size = sizeof(void *),
36 .align = __alignof__(void *),
37 .tag = PointerInfo,
38 .PointerInfo =
40 .sigil = "@",
41 .pointed = &Memory$info,
42 },
43 .metamethods = Pointer$metamethods,
44 };
46 const TypeInfo_t CStrToVoidStarTable = {
47 .size = sizeof(Table_t),
48 .align = __alignof__(Table_t),
49 .tag = TableInfo,
50 .TableInfo = {.key = &CString$info, .value = &MemoryPointer},
51 .metamethods = Table$metamethods,
52 };
54 PUREFUNC static INLINE size_t entry_size(const TypeInfo_t *info) {
55 size_t size = (size_t)info->TableInfo.key->size;
56 if (info->TableInfo.value->align > 1 && size % (size_t)info->TableInfo.value->align)
57 size += (size_t)info->TableInfo.value->align - (size % (size_t)info->TableInfo.value->align); // padding
58 size += (size_t)info->TableInfo.value->size;
59 if (info->TableInfo.key->align > 1 && size % (size_t)info->TableInfo.key->align)
60 size += (size_t)info->TableInfo.key->align - (size % (size_t)info->TableInfo.key->align); // padding
61 return size;
64 PUREFUNC static INLINE size_t value_offset(const TypeInfo_t *info) {
65 size_t offset = (size_t)info->TableInfo.key->size;
66 if ((size_t)info->TableInfo.value->align > 1 && offset % (size_t)info->TableInfo.value->align)
67 offset += (size_t)info->TableInfo.value->align - (offset % (size_t)info->TableInfo.value->align); // padding
68 return offset;
71 static void maybe_copy_on_write(Table_t *t, const TypeInfo_t *type) {
72 if (t->entries.data_refcount != 0) List$compact(&t->entries, (int64_t)entry_size(type));
74 if (t->bucket_info && t->bucket_info->data_refcount != 0) {
75 size_t size = sizeof(bucket_info_t) + sizeof(bucket_t[t->bucket_info->count]);
76 t->bucket_info = memcpy(GC_MALLOC(size), t->bucket_info, size);
77 t->bucket_info->data_refcount = 0;
81 // Return address of value or NULL
82 PUREFUNC public void *Table$get_raw(Table_t t, const void *key, const TypeInfo_t *type) {
83 assert(type->tag == TableInfo);
84 if (!key || !t.bucket_info) return NULL;
86 uint64_t hash = HASH_KEY(t, key);
87 bucket_t *buckets = t.bucket_info->buckets;
88 for (uint64_t i = hash; buckets[i].occupied; i = buckets[i].next_bucket) {
89 void *entry = GET_ENTRY(t, buckets[i].index);
90 if (EQUAL_KEYS(entry, key)) {
91 return entry + value_offset(type);
93 if (buckets[i].next_bucket == END_OF_CHAIN) break;
95 return NULL;
98 PUREFUNC public void *Table$get(Table_t t, const void *key, const TypeInfo_t *type) {
99 assert(type->tag == TableInfo);
100 for (const Table_t *iter = &t; iter; iter = iter->fallback) {
101 void *ret = Table$get_raw(*iter, key, type);
102 if (ret) return ret;
104 return NULL;
107 static void Table$set_bucket(Table_t *t, const void *entry, int32_t index, const TypeInfo_t *type) {
108 assert(t->bucket_info);
109 const void *key = entry;
110 bucket_t *buckets = t->bucket_info->buckets;
111 uint64_t hash = HASH_KEY(*t, key);
112 bucket_t *bucket = &buckets[hash];
113 if (!bucket->occupied) {
114 // Empty space:
115 bucket->occupied = 1;
116 bucket->index = index;
117 bucket->next_bucket = END_OF_CHAIN;
118 return;
121 while (buckets[t->bucket_info->last_free].occupied) {
122 assert(t->bucket_info->last_free > 0);
123 --t->bucket_info->last_free;
126 uint64_t collided_hash = HASH_KEY(*t, GET_ENTRY(*t, bucket->index));
127 if (collided_hash != hash) { // Collided with a mid-chain entry
128 // Find chain predecessor
129 uint64_t predecessor = collided_hash;
130 while (buckets[predecessor].next_bucket != hash)
131 predecessor = buckets[predecessor].next_bucket;
133 // Move mid-chain entry to free space and update predecessor
134 buckets[predecessor].next_bucket = t->bucket_info->last_free;
135 buckets[t->bucket_info->last_free] = *bucket;
137 bucket->occupied = 1;
138 bucket->index = index;
139 bucket->next_bucket = END_OF_CHAIN;
140 } else { // Collided with the start of a chain, put the new entry in chain
141 // position #2
142 buckets[t->bucket_info->last_free] =
143 (bucket_t){.occupied = 1, .index = index, .next_bucket = bucket->next_bucket};
144 bucket->next_bucket = t->bucket_info->last_free;
148 static void hashmap_resize_buckets(Table_t *t, uint32_t new_capacity, const TypeInfo_t *type) {
149 if (unlikely(new_capacity > TABLE_MAX_BUCKETS))
150 fail("Table has exceeded the maximum table size (2^31) and cannot grow "
151 "further!");
152 size_t alloc_size = sizeof(bucket_info_t) + sizeof(bucket_t[new_capacity]);
153 t->bucket_info = GC_MALLOC_ATOMIC(alloc_size);
154 memset(t->bucket_info->buckets, 0, sizeof(bucket_t[new_capacity]));
155 t->bucket_info->count = new_capacity;
156 t->bucket_info->last_free = new_capacity - 1;
157 // Rehash:
158 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
159 Table$set_bucket(t, GET_ENTRY(*t, i), i, type);
163 // Return address of value
164 #ifdef __GNUC__
165 #pragma GCC diagnostic push
166 #pragma GCC diagnostic ignored "-Wstack-protector"
167 #endif
168 public
169 void *Table$reserve(Table_t *t, const void *key, const void *value, const TypeInfo_t *type) {
170 assert(type->tag == TableInfo);
171 if (!t || !key) return NULL;
173 t->hash = 0;
175 int64_t key_size = type->TableInfo.key->size, value_size = type->TableInfo.value->size;
176 if (!t->bucket_info || t->bucket_info->count == 0) {
177 hashmap_resize_buckets(t, 8, type);
178 } else {
179 // Check if we are clobbering a value:
180 void *value_home = Table$get_raw(*t, key, type);
181 if (value_home) { // Update existing slot
182 // Ensure that `value_home` is still inside t->entries, even if COW occurs
183 ptrdiff_t offset = value_home - t->entries.data;
184 maybe_copy_on_write(t, type);
185 value_home = t->entries.data + offset;
187 if (value && value_size > 0) memcpy(value_home, value, (size_t)value_size);
189 return value_home;
192 // Otherwise add a new entry:
194 // Resize buckets if necessary
195 if (t->entries.length >= t->bucket_info->count) {
196 // Current resize policy: +50% at a time:
197 uint32_t newsize = MAX(8, (uint32_t)(3 * t->bucket_info->count) / 2);
198 if (unlikely(newsize > TABLE_MAX_BUCKETS)) newsize = TABLE_MAX_BUCKETS;
199 hashmap_resize_buckets(t, newsize, type);
202 if (!value && value_size > 0) {
203 for (Table_t *iter = t->fallback; iter; iter = iter->fallback) {
204 value = Table$get_raw(*iter, key, type);
205 if (value) break;
209 maybe_copy_on_write(t, type);
211 char buf[entry_size(type)];
212 memset(buf, 0, sizeof(buf));
213 memcpy(buf, key, (size_t)key_size);
214 if (value && value_size > 0) memcpy(buf + value_offset(type), value, (size_t)value_size);
215 else if (value_size > 0) memset(buf + value_offset(type), 0, (size_t)value_size);
216 List$insert(&t->entries, buf, I(0), (int64_t)entry_size(type));
218 int64_t entry_index = (int64_t)t->entries.length - 1;
219 void *entry = GET_ENTRY(*t, entry_index);
220 Table$set_bucket(t, entry, entry_index, type);
221 return entry + value_offset(type);
223 #ifdef __GNUC__
224 #pragma GCC diagnostic pop
225 #endif
227 public
228 void Table$set(Table_t *t, const void *key, const void *value, const TypeInfo_t *type) {
229 assert(type->tag == TableInfo);
230 (void)Table$reserve(t, key, value, type);
233 public
234 void Table$remove(Table_t *t, const void *key, const TypeInfo_t *type) {
235 assert(type->tag == TableInfo);
236 if (!t || Table$length(*t) == 0) return;
238 // TODO: this work doesn't need to be done if the key is already missing
239 maybe_copy_on_write(t, type);
241 // If unspecified, pop the last key:
242 if (!key) key = GET_ENTRY(*t, (int64_t)t->entries.length - 1);
244 // Steps: look up the bucket for the removed key
245 // If missing, then return immediately
246 // Swap last key/value into the removed bucket's index1
247 // Zero out the last key/value and decrement the count
248 // Find the last key/value's bucket and update its index1
249 // Look up the bucket for the removed key
250 // If bucket is first in chain:
251 // Move bucket->next to bucket's spot
252 // zero out bucket->next's old spot
253 // maybe update lastfree_index1 to second-in-chain's index
254 // Else:
255 // set prev->next = bucket->next
256 // zero out bucket
257 // maybe update lastfree_index1 to removed bucket's index
259 uint64_t hash = HASH_KEY(*t, key);
260 bucket_t *bucket, *prev = NULL;
261 for (uint64_t i = hash; t->bucket_info->buckets[i].occupied; i = t->bucket_info->buckets[i].next_bucket) {
262 if (EQUAL_KEYS(GET_ENTRY(*t, t->bucket_info->buckets[i].index), key)) {
263 bucket = &t->bucket_info->buckets[i];
264 goto found_it;
266 if (t->bucket_info->buckets[i].next_bucket == END_OF_CHAIN) return;
267 prev = &t->bucket_info->buckets[i];
269 return;
271 found_it:;
272 assert(bucket->occupied);
274 t->hash = 0;
276 // Always remove the last entry. If we need to remove some other entry,
277 // swap the other entry into the last position and then remove the last
278 // entry. This disturbs the ordering of the table, but keeps removal O(1)
279 // instead of O(N)
280 int64_t last_entry = (int64_t)t->entries.length - 1;
281 if (bucket->index != last_entry) {
282 // Find the bucket that points to the last entry's index:
283 uint64_t i = HASH_KEY(*t, GET_ENTRY(*t, last_entry));
284 while (t->bucket_info->buckets[i].index != last_entry)
285 i = t->bucket_info->buckets[i].next_bucket;
286 // Update the bucket to point to the last entry's new home (the space
287 // where the removed entry currently sits):
288 t->bucket_info->buckets[i].index = bucket->index;
290 // Clobber the entry being removed (in the middle of the list) with
291 // the last entry:
292 memcpy(GET_ENTRY(*t, bucket->index), GET_ENTRY(*t, last_entry), entry_size(type));
295 // Last entry is being removed, so clear it out to be safe:
296 memset(GET_ENTRY(*t, last_entry), 0, entry_size(type));
298 List$remove_at(&t->entries, I((int64_t)t->entries.length), I(1), (int64_t)entry_size(type));
300 int64_t bucket_to_clear;
301 if (prev) { // Middle (or end) of a chain
302 bucket_to_clear = (bucket - t->bucket_info->buckets);
303 prev->next_bucket = bucket->next_bucket;
304 } else if (bucket->next_bucket != END_OF_CHAIN) { // Start of a chain
305 bucket_to_clear = bucket->next_bucket;
306 *bucket = t->bucket_info->buckets[bucket_to_clear];
307 } else { // Empty chain
308 bucket_to_clear = (bucket - t->bucket_info->buckets);
311 t->bucket_info->buckets[bucket_to_clear] = (bucket_t){0};
312 if (bucket_to_clear > t->bucket_info->last_free) t->bucket_info->last_free = bucket_to_clear;
315 CONSTFUNC public void *Table$entry(Table_t t, int64_t n) {
316 if (n < 1 || n > (int64_t)Table$length(t)) return NULL;
317 return GET_ENTRY(t, n - 1);
320 public
321 void Table$clear(Table_t *t) {
322 *t = EMPTY_TABLE;
325 public
326 Table_t Table$sorted(Table_t t, const TypeInfo_t *type) {
327 Closure_t cmp = (Closure_t){.fn = generic_compare, .userdata = (void *)type->TableInfo.key};
328 List_t entries = List$sorted(t.entries, cmp, (int64_t)entry_size(type));
329 return Table$from_entries(entries, type);
332 PUREFUNC public bool Table$equal(const void *vx, const void *vy, const TypeInfo_t *type) {
333 if (vx == vy) return true;
334 Table_t *x = (Table_t *)vx, *y = (Table_t *)vy;
336 if (x->hash && y->hash && x->hash != y->hash) return false;
338 assert(type->tag == TableInfo);
339 if (x->entries.length != y->entries.length) return false;
341 if ((x->fallback != NULL) != (y->fallback != NULL)) return false;
343 const TypeInfo_t *value_type = type->TableInfo.value;
344 size_t offset = value_offset(type);
345 for (int64_t i = 0; i < (int64_t)x->entries.length; i++) {
346 void *x_key = x->entries.data + i * x->entries.stride;
347 void *y_value = Table$get_raw(*y, x_key, type);
348 if (!y_value) return false;
349 if (value_type->size > 0) {
350 void *x_value = x_key + offset;
351 if (!generic_equal(y_value, x_value, value_type)) return false;
354 return true;
357 PUREFUNC public int32_t Table$compare(const void *vx, const void *vy, const TypeInfo_t *type) {
358 if (vx == vy) return 0;
360 Table_t *x = (Table_t *)vx, *y = (Table_t *)vy;
361 assert(type->tag == TableInfo);
362 __typeof(type->TableInfo) table = type->TableInfo;
364 // Sort empty tables before non-empty tables:
365 if (x->entries.length == 0 || y->entries.length == 0) return ((x->entries.length > 0) - (y->entries.length > 0));
367 // Table comparison rules:
368 // - If two tables have different keys, then compare as if comparing a
369 // sorted list of the keys of the two tables:
370 // `x.keys.sorted() <> y.keys.sorted()`
371 // - Otherwise, compare as if comparing lists of values for the sorted key
372 // lists:
373 // `[x[k] for k in x.keys.sorted()] <> [y[k] for k in y.keys.sorted()]`
375 // We can do this in _linear_ time if we find the smallest `k` such that
376 // `x[k] != y[k]`, as well as the largest key in `x` and `y`.
378 void *mismatched_key = NULL, *max_x_key = NULL;
379 for (int64_t i = 0; i < (int64_t)x->entries.length; i++) {
380 void *key = x->entries.data + x->entries.stride * i;
381 if (max_x_key == NULL || generic_compare(key, max_x_key, table.key) > 0) max_x_key = key;
383 void *x_value = key + value_offset(type);
384 void *y_value = Table$get_raw(*y, key, type);
386 if (!y_value || (table.value->size > 0 && !generic_equal(x_value, y_value, table.value))) {
387 if (mismatched_key == NULL || generic_compare(key, mismatched_key, table.key) < 0) mismatched_key = key;
391 // If the keys are not all equal, we gotta check to see if there exists a
392 // `y[k]` such that `k` is smaller than all keys that `x` has and `y` doesn't:
393 void *max_y_key = NULL;
394 for (int64_t i = 0; i < (int64_t)y->entries.length; i++) {
395 void *key = y->entries.data + y->entries.stride * i;
396 if (max_y_key == NULL || generic_compare(key, max_y_key, table.key) > 0) max_y_key = key;
398 void *y_value = key + value_offset(type);
399 void *x_value = Table$get_raw(*x, key, type);
400 if (!x_value || (table.value->size > 0 && !generic_equal(x_value, y_value, table.value))) {
401 if (mismatched_key == NULL || generic_compare(key, mismatched_key, table.key) < 0) mismatched_key = key;
405 if (mismatched_key) {
406 void *x_value = Table$get_raw(*x, mismatched_key, type);
407 void *y_value = Table$get_raw(*y, mismatched_key, type);
408 if (x_value && y_value) {
409 return generic_compare(x_value, y_value, table.value);
410 } else if (y_value) {
411 // The smallest mismatched key is in Y, but not X.
412 // In this case, we should judge if the key is smaller than *any*
413 // key in X or if it's bigger than *every* key in X.
414 // Example 1:
415 // x={10, 20, 30} > y={10, 20, 25, 30}
416 // The smallest mismatched key is `25`, and we know that `x` is
417 // larger than `y` because `30 > 25`.
418 // Example 2:
419 // x={10, 20, 30} > y={10, 20, 30, 999}
420 // The smallest mismatched key is `999`, and we know that `x` is
421 // smaller than `y` because `30 < 999`.
422 return max_x_key ? generic_compare(max_x_key, mismatched_key, table.key) : -1;
423 } else {
424 assert(x_value);
425 // The smallest mismatched key is in X, but not Y. The same logic
426 // above applies, but reversed.
427 return max_y_key ? -generic_compare(max_y_key, mismatched_key, table.key) : 1;
431 assert(x->entries.length == y->entries.length);
433 // Assuming keys are the same, compare values:
434 if (table.value->size > 0) {
435 for (int64_t i = 0; i < (int64_t)x->entries.length; i++) {
436 void *key = x->entries.data + x->entries.stride * i;
437 void *x_value = key + value_offset(type);
438 void *y_value = Table$get_raw(*y, key, type);
439 int32_t diff = generic_compare(x_value, y_value, table.value);
440 if (diff != 0) return diff;
444 if (!x->fallback != !y->fallback) {
445 return (!x->fallback) - (!y->fallback);
446 } else if (x->fallback && y->fallback) {
447 return generic_compare(x->fallback, y->fallback, type);
450 return 0;
453 PUREFUNC public uint64_t Table$hash(const void *obj, const TypeInfo_t *type) {
454 assert(type->tag == TableInfo);
455 Table_t *t = (Table_t *)obj;
456 if (t->hash != 0) return t->hash;
458 // Table hashes are computed as:
459 // hash(t.length, (xor: t.keys), (xor: t.values), t.fallback)
460 // Where fallback and default hash to zero if absent
461 __typeof(type->TableInfo) table = type->TableInfo;
462 uint64_t keys_hash = 0, values_hash = 0;
463 size_t offset = value_offset(type);
464 if (table.value->size > 0) {
465 for (int64_t i = 0; i < (int64_t)t->entries.length; i++) {
466 keys_hash ^= generic_hash(t->entries.data + i * t->entries.stride, table.key);
467 values_hash ^= generic_hash(t->entries.data + i * t->entries.stride + offset, table.value);
469 } else {
470 for (int64_t i = 0; i < (int64_t)t->entries.length; i++)
471 keys_hash ^= generic_hash(t->entries.data + i * t->entries.stride, table.key);
474 volatile struct {
475 int64_t length;
476 uint64_t keys_hash, values_hash, fallback_hash;
477 } components = {
478 (int64_t)t->entries.length,
479 keys_hash,
480 values_hash,
481 t->fallback ? Table$hash(t->fallback, type) : 0,
483 t->hash = siphash24((void *)&components, sizeof(components));
484 if unlikely (t->hash == 0) t->hash = 1234567;
485 return t->hash;
488 public
489 Text_t Table$as_text(const void *obj, bool colorize, const TypeInfo_t *type) {
490 Table_t *t = (Table_t *)obj;
491 assert(type->tag == TableInfo);
492 __typeof(type->TableInfo) table = type->TableInfo;
494 if (!t) {
495 return table.value->size > 0 ? Text$concat(Text("{"), generic_as_text(NULL, false, table.key), Text(":"),
496 generic_as_text(NULL, false, table.value), Text("}"))
497 : Text$concat(Text("{"), generic_as_text(NULL, false, table.key), Text("}"));
500 int64_t val_off = (int64_t)value_offset(type);
501 Text_t text = Text("{");
502 for (int64_t i = 0, length = (int64_t)Table$length(*t); i < length; i++) {
503 if (i > 0) text = Text$concat(text, Text(", "));
504 void *entry = GET_ENTRY(*t, i);
505 text = Text$concat(text, generic_as_text(entry, colorize, table.key));
506 if (table.value->size > 0)
507 text = Text$concat(text, Text(": "), generic_as_text(entry + val_off, colorize, table.value));
510 if (t->fallback) {
511 text = Text$concat(text, Text("; fallback="), Table$as_text(t->fallback, colorize, type));
514 text = Text$concat(text, Text("}"));
515 return text;
518 public
519 Table_t Table$from_entries(List_t entries, const TypeInfo_t *type) {
520 assert(type->tag == TableInfo);
521 if (entries.length == 0) return (Table_t){};
523 Table_t t = EMPTY_TABLE;
524 int64_t length = (int64_t)entries.length + (int64_t)entries.length / 4;
525 size_t alloc_size = sizeof(bucket_info_t) + sizeof(bucket_t[length]);
526 t.bucket_info = GC_MALLOC_ATOMIC(alloc_size);
527 memset(t.bucket_info->buckets, 0, sizeof(bucket_t[length]));
528 t.bucket_info->count = length;
529 t.bucket_info->last_free = length - 1;
531 size_t offset = value_offset(type);
532 for (int64_t i = 0; i < (int64_t)entries.length; i++) {
533 void *key = entries.data + i * entries.stride;
534 Table$set(&t, key, key + offset, type);
536 return t;
539 // "set intersection" in formal terms
540 public
541 Table_t Table$intersection(Table_t a, Table_t b, const TypeInfo_t *type) {
542 // Return a table such that t[k]==a[k] for all k such that a.has(k), b.has(k),
543 // and a[k]==b[k]
544 Table_t result = EMPTY_TABLE;
545 const size_t offset = value_offset(type);
546 for (Table_t *t = &a; t; t = t->fallback) {
547 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
548 void *key = GET_ENTRY(*t, i);
549 void *a_value = key + offset;
550 void *b_value = Table$get(b, key, type);
551 if (b_value && (type->TableInfo.value->size == 0 || generic_equal(a_value, b_value, type->TableInfo.value)))
552 Table$set(&result, key, a_value, type);
555 return result;
558 // "set union" in formal terms
559 public
560 Table_t Table$with(Table_t a, Table_t b, const TypeInfo_t *type) {
561 // return a table such that t[k]==b[k] for all k such that b.has(k), and
562 // t[k]==a[k] for all k such that a.has(k) and not b.has(k)
563 Table_t result = EMPTY_TABLE;
564 const size_t offset = value_offset(type);
565 for (Table_t *t = &a; t; t = t->fallback) {
566 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
567 void *key = GET_ENTRY(*t, i);
568 Table$set(&result, key, key + offset, type);
571 for (Table_t *t = &b; t; t = t->fallback) {
572 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
573 void *key = GET_ENTRY(*t, i);
574 Table$set(&result, key, key + offset, type);
577 return result;
580 // "disjunctive union" or "symmetric difference" in formal terms
581 public
582 Table_t Table$difference(Table_t a, Table_t b, const TypeInfo_t *type) {
583 // return a table with elements in `a` or `b`, but not both
584 Table_t result = EMPTY_TABLE;
585 const size_t offset = value_offset(type);
586 for (Table_t *t = &a; t; t = t->fallback) {
587 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
588 void *key = GET_ENTRY(*t, i);
589 if (Table$get(b, key, type) == NULL) Table$set(&result, key, key + offset, type);
592 for (Table_t *t = &b; t; t = t->fallback) {
593 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
594 void *key = GET_ENTRY(*t, i);
595 if (Table$get(a, key, type) == NULL) Table$set(&result, key, key + offset, type);
598 return result;
601 // "without" is "set difference" in formal terms
602 public
603 Table_t Table$without(Table_t a, Table_t b, const TypeInfo_t *type) {
604 // Return a table such that t[k]==a[k] for all k such that not b.has(k) or
605 // b[k] != a[k]
606 Table_t result = EMPTY_TABLE;
607 const size_t offset = value_offset(type);
608 for (Table_t *t = &a; t; t = t->fallback) {
609 for (int64_t i = 0; i < (int64_t)Table$length(*t); i++) {
610 void *key = GET_ENTRY(*t, i);
611 void *a_value = key + offset;
612 void *b_value = Table$get(b, key, type);
613 if (!b_value
614 || (type->TableInfo.value->size > 0 && !generic_equal(a_value, b_value, type->TableInfo.value)))
615 Table$set(&result, key, a_value, type);
618 return result;
621 public
622 Table_t Table$with_fallback(Table_t t, OptionalTable_t fallback) {
623 if (fallback.entries.length <= 0) {
624 t.fallback = NULL;
625 return t;
626 } else {
627 TABLE_INCREF(fallback);
628 t.fallback = GC_MALLOC(sizeof(Table_t));
629 *t.fallback = fallback;
630 return t;
634 PUREFUNC public bool Table$is_subset_of(Table_t a, Table_t b, bool strict, const TypeInfo_t *type) {
635 if (a.entries.length > b.entries.length || (strict && a.entries.length == b.entries.length)) return false;
637 for (int64_t i = 0; i < (int64_t)Table$length(a); i++) {
638 void *found = Table$get_raw(b, GET_ENTRY(a, i), type);
639 if (!found) return false;
641 return true;
644 PUREFUNC public bool Table$is_superset_of(Table_t a, Table_t b, bool strict, const TypeInfo_t *type) {
645 return Table$is_subset_of(b, a, strict, type);
648 PUREFUNC public void *Table$str_get(Table_t t, const char *key) {
649 void **ret = Table$get(t, &key, &CStrToVoidStarTable);
650 return ret ? *ret : NULL;
653 PUREFUNC public void *Table$str_get_raw(Table_t t, const char *key) {
654 void **ret = Table$get_raw(t, &key, &CStrToVoidStarTable);
655 return ret ? *ret : NULL;
658 public
659 void *Table$str_reserve(Table_t *t, const char *key, const void *value) {
660 return Table$reserve(t, &key, &value, &CStrToVoidStarTable);
663 public
664 void Table$str_set(Table_t *t, const char *key, const void *value) {
665 Table$set(t, &key, &value, &CStrToVoidStarTable);
668 public
669 void Table$str_remove(Table_t *t, const char *key) {
670 return Table$remove(t, &key, &CStrToVoidStarTable);
673 CONSTFUNC public void *Table$str_entry(Table_t t, int64_t n) {
674 return Table$entry(t, n);
677 PUREFUNC public bool Table$is_none(const void *obj, const TypeInfo_t *info) {
678 (void)info;
679 return ((Table_t *)obj)->entries.data == NULL;
682 public
683 void Table$serialize(const void *obj, FILE *out, Table_t *pointers, const TypeInfo_t *type) {
684 Table_t *t = (Table_t *)obj;
685 int64_t len = (int64_t)t->entries.length;
686 Int64$serialize(&len, out, pointers, &Int64$info);
688 size_t offset = value_offset(type);
689 for (int64_t i = 0; i < len; i++) {
690 _serialize(t->entries.data + i * t->entries.stride, out, pointers, type->TableInfo.key);
691 if (type->TableInfo.value->size > 0)
692 _serialize(t->entries.data + i * t->entries.stride + offset, out, pointers, type->TableInfo.value);
695 assert(fputc(t->fallback != NULL ? 1 : 0, out) != EOF);
696 if (t->fallback) Table$serialize(t->fallback, out, pointers, type);
699 public
700 void Table$deserialize(FILE *in, void *outval, List_t *pointers, const TypeInfo_t *type) {
701 int64_t len;
702 Int64$deserialize(in, &len, pointers, &Int$info);
704 Table_t t = EMPTY_TABLE;
705 char keybuf[256], valbuf[256];
706 char *key =
707 (size_t)type->TableInfo.key->size <= sizeof(keybuf) ? keybuf : GC_MALLOC((size_t)type->TableInfo.key->size);
708 char *value =
709 (size_t)type->TableInfo.value->size <= sizeof(valbuf) ? valbuf : GC_MALLOC((size_t)type->TableInfo.value->size);
711 for (int64_t i = 0; i < len; i++) {
712 _deserialize(in, key, pointers, type->TableInfo.key);
713 if (type->TableInfo.value->size > 0) _deserialize(in, value, pointers, type->TableInfo.value);
714 Table$set(&t, key, value, type);
717 if (fgetc(in) != 0) {
718 t.fallback = GC_MALLOC(sizeof(Table_t));
719 Table$deserialize(in, t.fallback, pointers, type);
722 *(Table_t *)outval = t;
725 public
726 const TypeInfo_t Present$$info = {.size = 0,
727 .align = 0,
728 .tag = StructInfo,
729 .metamethods = Struct$metamethods,
730 .StructInfo.name = "Present",
731 .StructInfo.num_fields = 0};
733 public
734 const Present$$type PRESENT = {};