Use SIZE_MAX instead of SIZE_T_MAX

This commit is contained in:
jun 2025-03-19 23:03:10 +01:00
parent 29014f4c52
commit c039e24996
4 changed files with 21 additions and 20 deletions

View File

@ -180,8 +180,6 @@ GUF_DICT_KWRDS bool GUF_CAT(GUF_DICT_NAME, _debug_valid_size)(const GUF_DICT_NAM
#include "guf_assert.h" #include "guf_assert.h"
#include "guf_math.h" #include "guf_math.h"
GUF_DICT_KWRDS bool GUF_CAT(GUF_DICT_NAME, _debug_valid_size)(const GUF_DICT_NAME *ht) GUF_DICT_KWRDS bool GUF_CAT(GUF_DICT_NAME, _debug_valid_size)(const GUF_DICT_NAME *ht)
{ {
ptrdiff_t cnt = 0; ptrdiff_t cnt = 0;
@ -267,12 +265,12 @@ GUF_DICT_KWRDS ptrdiff_t GUF_CAT(GUF_DICT_NAME, _max_capacity)(void)
const size_t max_cap_kv_indices = GUF_ALLOC_MAX_CAPACITY(GUF_DICT_KV_META_T); const size_t max_cap_kv_indices = GUF_ALLOC_MAX_CAPACITY(GUF_DICT_KV_META_T);
// Find next power of two (capacities must be powers of two). // Find next power of two (capacities must be powers of two).
size_t pow2_cap = 1; size_t pow2_cap = 1;
const size_t MAX_SIZE_POW2 = SIZE_T_MAX & ~(SIZE_T_MAX >> 1); const size_t MAX_SIZE_POW2 = SIZE_MAX & ~(SIZE_MAX >> 1);
while ((pow2_cap < MAX_SIZE_POW2) && ((pow2_cap << 1) < max_cap_kv_indices) ) { while ((pow2_cap < MAX_SIZE_POW2) && ((pow2_cap << 1) <= max_cap_kv_indices) ) {
pow2_cap <<= 1; pow2_cap <<= 1;
} }
GUF_ASSERT(guf_is_pow2_size_t(pow2_cap) && pow2_cap <= max_cap_kv_indices && pow2_cap > 1); GUF_ASSERT(guf_is_pow2_size_t(pow2_cap) && pow2_cap <= max_cap_kv_indices && pow2_cap > 1);
return GUF_MIN(GUF_MIN(max_cap_kv_elems, pow2_cap), PTRDIFF_MAX); return GUF_MIN(GUF_MIN(max_cap_kv_elems, pow2_cap), PTRDIFF_MAX);
} }
@ -328,7 +326,7 @@ static size_t GUF_CAT(GUF_DICT_NAME, _find_idx)(GUF_DICT_NAME *ht, const GUF_DIC
{ {
if (ht->kv_indices_cap <= 0) { if (ht->kv_indices_cap <= 0) {
*key_exists = false; *key_exists = false;
return SIZE_T_MAX; return SIZE_MAX;
} }
const GUF_DICT_KV_META_T key_hash_frag = GUF_DICT_HASH_T_GET_HASHFRAG(key_hash); const GUF_DICT_KV_META_T key_hash_frag = GUF_DICT_HASH_T_GET_HASHFRAG(key_hash);
@ -337,7 +335,7 @@ static size_t GUF_CAT(GUF_DICT_NAME, _find_idx)(GUF_DICT_NAME *ht, const GUF_DIC
size_t idx = GUF_MOD_CAP(key_hash); size_t idx = GUF_MOD_CAP(key_hash);
const size_t start_idx = idx; const size_t start_idx = idx;
size_t first_tombstone_idx = SIZE_T_MAX; size_t first_tombstone_idx = SIZE_MAX;
size_t probe_len = 0; size_t probe_len = 0;
// size_t seen_occupied = 0; // This allows us to bail out early once we visited every non-null/non-tombstone kv_idx. // size_t seen_occupied = 0; // This allows us to bail out early once we visited every non-null/non-tombstone kv_idx.
do { do {
@ -345,14 +343,14 @@ static size_t GUF_CAT(GUF_DICT_NAME, _find_idx)(GUF_DICT_NAME *ht, const GUF_DIC
const GUF_DICT_KV_META_T kv_hashfrag = GUF_DICT_META_GET_HASHFRAG(ht->kv_indices[idx]); const GUF_DICT_KV_META_T kv_hashfrag = GUF_DICT_META_GET_HASHFRAG(ht->kv_indices[idx]);
if (kv_idx == GUF_DICT_KV_META_IDX_NULL) { // 1.) Empty. if (kv_idx == GUF_DICT_KV_META_IDX_NULL) { // 1.) Empty.
if (first_tombstone_idx != SIZE_T_MAX) { if (first_tombstone_idx != SIZE_MAX) {
idx = first_tombstone_idx; idx = first_tombstone_idx;
} }
ht->max_probelen = GUF_MAX((ptrdiff_t)probe_len, ht->max_probelen); ht->max_probelen = GUF_MAX((ptrdiff_t)probe_len, ht->max_probelen);
*key_exists = false; *key_exists = false;
return idx; return idx;
} else if (kv_idx == GUF_DICT_KV_META_IDX_TOMBSTONE) { // 2.) Tombstone. } else if (kv_idx == GUF_DICT_KV_META_IDX_TOMBSTONE) { // 2.) Tombstone.
if (first_tombstone_idx == SIZE_T_MAX) { if (first_tombstone_idx == SIZE_MAX) {
first_tombstone_idx = idx; first_tombstone_idx = idx;
} }
goto probe; goto probe;
@ -369,12 +367,12 @@ static size_t GUF_CAT(GUF_DICT_NAME, _find_idx)(GUF_DICT_NAME *ht, const GUF_DIC
} while (idx != start_idx && probe_len < (size_t)ht->kv_indices_cap); } while (idx != start_idx && probe_len < (size_t)ht->kv_indices_cap);
*key_exists = false; *key_exists = false;
if (first_tombstone_idx != SIZE_T_MAX) { // Edge case: No empty slots, but found tombstone. if (first_tombstone_idx != SIZE_MAX) { // Edge case: No empty slots, but found tombstone.
ht->max_probelen = GUF_MAX((ptrdiff_t)probe_len, ht->max_probelen); ht->max_probelen = GUF_MAX((ptrdiff_t)probe_len, ht->max_probelen);
GUF_ASSERT(GUF_DICT_META_GET_IDX(ht->kv_indices[first_tombstone_idx]) == GUF_DICT_KV_META_IDX_NULL); GUF_ASSERT(GUF_DICT_META_GET_IDX(ht->kv_indices[first_tombstone_idx]) == GUF_DICT_KV_META_IDX_NULL);
return first_tombstone_idx; return first_tombstone_idx;
} else { // Failed to find an idx. } else { // Failed to find an idx.
return SIZE_T_MAX; return SIZE_MAX;
} }
#undef GUF_MOD_CAP #undef GUF_MOD_CAP
} }
@ -448,7 +446,7 @@ static void GUF_CAT(GUF_DICT_NAME, _try_grow_if_necessary)(GUF_DICT_NAME *ht, gu
const GUF_DICT_HASH_T key_hash = GUF_DICT_KEY_HASH(&kv->key); // TODO: might be expensive... const GUF_DICT_HASH_T key_hash = GUF_DICT_KEY_HASH(&kv->key); // TODO: might be expensive...
const size_t new_idx = GUF_CAT(GUF_DICT_NAME, _find_idx)(ht, &kv->key, key_hash, &key_exists); const size_t new_idx = GUF_CAT(GUF_DICT_NAME, _find_idx)(ht, &kv->key, key_hash, &key_exists);
GUF_ASSERT(!key_exists); GUF_ASSERT(!key_exists);
GUF_ASSERT(new_idx < SIZE_T_MAX && new_idx < (size_t)ht->kv_indices_cap); GUF_ASSERT(new_idx < SIZE_MAX && new_idx < (size_t)ht->kv_indices_cap);
GUF_ASSERT((GUF_DICT_HASH_T_GET_HASHFRAG(key_hash) & (GUF_DICT_KV_META_T)kv_idx) == 0); GUF_ASSERT((GUF_DICT_HASH_T_GET_HASHFRAG(key_hash) & (GUF_DICT_KV_META_T)kv_idx) == 0);
ht->kv_indices[new_idx] = GUF_DICT_HASH_T_GET_HASHFRAG(key_hash) | (GUF_DICT_KV_META_T)kv_idx; ht->kv_indices[new_idx] = GUF_DICT_HASH_T_GET_HASHFRAG(key_hash) | (GUF_DICT_KV_META_T)kv_idx;
} }
@ -593,7 +591,7 @@ GUF_DICT_KWRDS void GUF_CAT(GUF_DICT_NAME, _insert_val_arg)(GUF_DICT_NAME *ht, G
if (!key_exists) { if (!key_exists) {
return NULL; return NULL;
} else { } else {
GUF_ASSERT(idx != SIZE_T_MAX); GUF_ASSERT(idx != SIZE_MAX);
GUF_ASSERT((ptrdiff_t)idx < ht->kv_indices_cap); GUF_ASSERT((ptrdiff_t)idx < ht->kv_indices_cap);
const size_t kv_idx = GUF_DICT_META_GET_IDX(ht->kv_indices[idx]); const size_t kv_idx = GUF_DICT_META_GET_IDX(ht->kv_indices[idx]);
GUF_ASSERT(kv_idx <= PTRDIFF_MAX && (ptrdiff_t)kv_idx < ht->kv_elems.size); GUF_ASSERT(kv_idx <= PTRDIFF_MAX && (ptrdiff_t)kv_idx < ht->kv_elems.size);
@ -618,7 +616,7 @@ GUF_DICT_KWRDS bool GUF_CAT(GUF_DICT_NAME, _contains)(GUF_DICT_NAME *ht, const G
const GUF_DICT_HASH_T key_hash = GUF_DICT_KEY_HASH(key); const GUF_DICT_HASH_T key_hash = GUF_DICT_KEY_HASH(key);
const size_t idx = GUF_CAT(GUF_DICT_NAME, _find_idx)(ht, key, key_hash, &key_exists); const size_t idx = GUF_CAT(GUF_DICT_NAME, _find_idx)(ht, key, key_hash, &key_exists);
if (key_exists) { if (key_exists) {
GUF_ASSERT(idx != SIZE_T_MAX); GUF_ASSERT(idx != SIZE_MAX);
GUF_ASSERT(!GUF_DICT_META_IS_TOMBSTONE(ht->kv_indices[idx])); GUF_ASSERT(!GUF_DICT_META_IS_TOMBSTONE(ht->kv_indices[idx]));
GUF_ASSERT(!GUF_DICT_META_IS_NULL(ht->kv_indices[idx])); GUF_ASSERT(!GUF_DICT_META_IS_NULL(ht->kv_indices[idx]));
} }
@ -646,7 +644,7 @@ GUF_DICT_KWRDS bool GUF_CAT(GUF_DICT_NAME, _erase)(GUF_DICT_NAME *ht, const GUF_
if (!key_exists) { if (!key_exists) {
return false; return false;
} }
GUF_ASSERT(idx < SIZE_T_MAX && (ptrdiff_t)idx < ht->kv_indices_cap); GUF_ASSERT(idx < SIZE_MAX && (ptrdiff_t)idx < ht->kv_indices_cap);
const size_t kv_idx = (size_t)GUF_DICT_META_GET_IDX(ht->kv_indices[idx]); const size_t kv_idx = (size_t)GUF_DICT_META_GET_IDX(ht->kv_indices[idx]);
GUF_ASSERT(kv_idx < (size_t)ht->kv_elems.size); GUF_ASSERT(kv_idx < (size_t)ht->kv_elems.size);

View File

@ -68,8 +68,8 @@ GUF_SORT_KWRDS GUF_T *GUF_CAT(GUF_FN_NAME_PREFIX, _merge_sort)(GUF_T *restrict a
GUF_T *in = arr; GUF_T *in = arr;
GUF_T *out = arr_tmp; GUF_T *out = arr_tmp;
const size_t arr_len = n; const size_t arr_len = n;
for (size_t len = 1; len < arr_len; len = (len * 2 > len) ? 2 * len : SIZE_T_MAX) { // Subarray len 1, 2, 4, 8, ... for (size_t len = 1; len < arr_len; len = (len * 2 > len) ? 2 * len : SIZE_MAX) { // Subarray len 1, 2, 4, 8, ...
for (size_t i = 0; i < arr_len; i = ((i + 2 * len) > i) ? (i + 2 * len) : SIZE_T_MAX) { // For each pair of subarrays of length len: for (size_t i = 0; i < arr_len; i = ((i + 2 * len) > i) ? (i + 2 * len) : SIZE_MAX) { // For each pair of subarrays of length len:
const size_t left_begin = i; // left subarray: [left_begin, right_begin) const size_t left_begin = i; // left subarray: [left_begin, right_begin)
const size_t right_begin = GUF_MIN(i + len, arr_len), right_end = GUF_MIN(i + 2 * len, arr_len); // right subarray [right_begin, right_end) const size_t right_begin = GUF_MIN(i + len, arr_len), right_end = GUF_MIN(i + 2 * len, arr_len); // right subarray [right_begin, right_end)
size_t left_idx = left_begin, right_idx = right_begin; size_t left_idx = left_begin, right_idx = right_begin;

View File

@ -187,7 +187,7 @@ GUF_STR_KWRDS bool guf_str_is_uninit(const guf_str *str);
} }
#elif defined(GUF_PLATFORM_BIG_ENDIAN) #elif defined(GUF_PLATFORM_BIG_ENDIAN)
#define GUF_STR_IS_LONG_MASK ((unsigned char)0x80) /* binary 1000 0000 */ #define GUF_STR_IS_LONG_MASK ((unsigned char)0x80) /* binary 1000 0000 */
#define GUF_STR_GET_CAP_MASK ((size_t)SIZE_T_MAX >> 1u) /* binary 0111.1111 (1111.1111)* 1111.1111 */ #define GUF_STR_GET_CAP_MASK ((size_t)SIZE_MAX >> 1u) /* binary 0111.1111 (1111.1111)* 1111.1111 */
static inline void guf_str_set_lng_cap_(guf_str *str, size_t cap_with_null) static inline void guf_str_set_lng_cap_(guf_str *str, size_t cap_with_null)
{ {

View File

@ -1,11 +1,14 @@
- sort: add cpp #ifdef to remove restrict from declaration - sort: add cpp #ifdef to remove restrict from declaration
- tests for guf_dict with GUF_DICT_64_BIT_IDX (and also hash32/hash64); maybe pass kv_type to insert to avoid copy - tests for guf_dict with GUF_DICT_64_BIT_IDX (and also hash32/hash64); maybe pass kv_type to insert to avoid copy
- dict elems shrink to fit; allow to pass GUF_DBUF_USE_GROWTH_FAC_ONE_POINT_FIVE - dict elems shrink to fit; allow to pass GUF_DBUF_USE_GROWTH_FAC_ONE_POINT_FIVE; start capacity (for elems and kv_indices?)
- dict: if load factor is high due to mostly tombstones, just try rehashing without resizing first?
- bench
- example directory
- guf_stack, guf_queue, guf_dqueue, guf_prio_queue (using a heap), guf_ringbuf - guf_stack, guf_queue, guf_dqueue, guf_prio_queue (using a heap), guf_ringbuf
- guf_dict: maybe put key_hash into kv_elem; maybe change order of key and val in kv_elem depending on size of key and val.
- track allocs for test (implement alloc tracker): - track allocs for test (implement alloc tracker):
- each thread needs its own alloc and alloc_ctx; don't track granular, give each allocator it's unique id maybe? - each thread needs its own alloc and alloc_ctx; don't track granular, give each allocator it's unique id maybe?