diff --git a/Makefile b/Makefile index 13654d3c..28853273 100644 --- a/Makefile +++ b/Makefile @@ -65,12 +65,13 @@ extract: $(ALL_KRML_FILES) -header spdx-header.txt \ -library Steel.ArrayArith -static-header Steel.ArrayArith -no-prefix Steel.ArrayArith \ -bundle Steel.SpinLock= -bundle 'FStar.\*,Steel.\*' \ - -bundle 'StarMalloc=Map.\*,Impl.\*,Spec.\*,Main,Main2,Main.Meta,LargeAlloc'[rename=StarMalloc] \ + -bundle 'StarMalloc=Mman2,FatalError,Map.\*,Impl.\*,Spec.\*,Main,Main2,Main.Meta,LargeAlloc'[rename=StarMalloc] \ -bundle 'SlabsCommon,SlabsFree,SlabsAlloc'[rename=Slabs] \ -bundle 'SlotsFree,SlotsAlloc'[rename=Slots] \ -bundle 'ArrayList,ArrayListGen'[rename=ArrayList] \ -no-prefix LargeAlloc \ -no-prefix Mman \ + -no-prefix Mman2 \ -no-prefix MemoryTrap \ -no-prefix ExternUtils \ -warn-error +9 \ diff --git a/c/memory.c b/c/memory.c index 33d75342..0bb58081 100644 --- a/c/memory.c +++ b/c/memory.c @@ -6,6 +6,8 @@ #include "internal/StarMalloc.h" #include "fatal_error.h" +/// Mman.fst + // syscall wrapper: initialization (fatal error on failure) uint8_t *mmap_init(size_t size) { void* ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); @@ -42,38 +44,6 @@ void munmap_u8(uint8_t* ptr, size_t len) { return; } -// syscall wrapper -void mmap_strict_trap (uint8_t* ptr, size_t len) { - void* p = mmap((void*) ptr, len, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); - if (p == MAP_FAILED && errno != ENOMEM) { - fatal_error("non-ENOMEM mmap failure"); - } - return; -} - -// syscall wrapper -void mmap_strict_untrap (uint8_t* ptr, size_t len) { - void* p = mmap((void*) ptr, len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); - if (p == MAP_FAILED && errno != ENOMEM) { - fatal_error("non-ENOMEM mmap failure"); - } - return; -} - -// syscall wrapper -void mmap_trap (uint8_t* ptr, size_t len) { - int r = madvise((void*) ptr, len, MADV_DONTNEED); - if (r && errno != ENOMEM) { - fatal_error("non-ENOMEM MADV_DONTNEED madvise failure"); - } - return; -} - -// wrapper -void mmap_untrap (uint8_t* ptr, size_t len) { - return; -} - // monomorphised functions // large allocator allocation wrapper @@ -116,7 +86,43 @@ uint32_t* mmap_sizes_init (size_t len) { return (uint32_t*) mmap_init(len * sizeof(uint32_t)); } +/// Mman2 + // large allocator init -Impl_Trees_Types_node** mmap_ptr_metadata() { - return (Impl_Trees_Types_node**) mmap_init(sizeof(Impl_Trees_Types_node*)); +Impl_Trees_Cast_M_node** mmap_ptr_metadata_init() { + return (Impl_Trees_Cast_M_node**) mmap_init(sizeof(Impl_Trees_Cast_M_node*)); +} + +/// MemoryTrap.fst + +// syscall wrapper +void mmap_strict_trap (uint8_t* ptr, size_t len) { + void* p = mmap((void*) ptr, len, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); + if (p == MAP_FAILED && errno != ENOMEM) { + fatal_error("non-ENOMEM mmap failure"); + } + return; +} + +// syscall wrapper +void mmap_strict_untrap (uint8_t* ptr, size_t len) { + void* p = mmap((void*) ptr, len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); + if (p == MAP_FAILED && errno != ENOMEM) { + fatal_error("non-ENOMEM mmap failure"); + } + return; +} + +// syscall wrapper +void mmap_trap (uint8_t* ptr, size_t len) { + int r = madvise((void*) ptr, len, MADV_DONTNEED); + if (r && errno != ENOMEM) { + fatal_error("non-ENOMEM MADV_DONTNEED madvise failure"); + } + return; +} + +// wrapper +void mmap_untrap (uint8_t* ptr, size_t len) { + return; } diff --git a/c/utils.c b/c/utils.c index 22e51f6a..c6030226 100644 --- a/c/utils.c +++ b/c/utils.c @@ -23,9 +23,9 @@ size_t builtin_mul_overflow(size_t x, size_t y) { } // required comparison using uintptr_t -uint64_t Impl_Trees_Types_cmp( - Impl_Trees_Types_data x, - Impl_Trees_Types_data y) { +uint64_t Impl_Trees_Cast_M_cmp( + Impl_Trees_Cast_M_data x, + Impl_Trees_Cast_M_data y) { uintptr_t x_cast = (uintptr_t) x.fst; uintptr_t y_cast = (uintptr_t) y.fst; if (x_cast == y_cast) { @@ -43,27 +43,12 @@ uint8_t* memcpy_u8(uint8_t* dest, uint8_t* src, size_t n) { } // monomorphized (from void* to uint8_t*) glue -// TODO: memset can be optimized, use hacl-star libmemzero +// TODO: compat, use hacl-star libmemzero void apply_zeroing_u8(uint8_t* dest, size_t n) { - memset((void*) dest, 0, n); + explicit_bzero(dest, n); return; } -// required casts -Impl_Trees_Types_node* Impl_Trees_Types_array_u8__to__ref_node(uint8_t* arr) { - // see lib_avl_mono/Impl.Trees.Types.fst - static_assert(sizeof(Impl_Trees_Types_node) <= 64); - return (Impl_Trees_Types_node*) arr; -} -uint8_t* Impl_Trees_Types_ref_node__to__array_u8(Impl_Trees_Types_node* r) { - return (uint8_t*) r; -} - -// TODO: comment -void StarMalloc_malloc_zeroing_die(uint8_t* ptr) { - fatal_error("malloc_zeroing_die"); -} - bool check_zeroing_u8(uint8_t* ptr, size_t len) { for (size_t i = 0; i < len; i++) { if (ptr[i] != 0) { @@ -73,7 +58,28 @@ bool check_zeroing_u8(uint8_t* ptr, size_t len) { return true; } -void SlotsFree_deallocate_zeroing(uint32_t sc, uint8_t* ptr) { - size_t len = (size_t) sc; - memset(ptr, 0, len); +// required casts +Impl_Trees_Cast_M_node* Impl_Trees_Cast_M_array_u8__to__ref_node(uint8_t* arr) { + // see lib_avl_mono/Impl.Trees.Types.fst + static_assert(sizeof(Impl_Trees_Cast_M_node) <= 64); + return (Impl_Trees_Cast_M_node*) arr; +} +uint8_t* Impl_Trees_Cast_M_ref_node__to__array_u8(Impl_Trees_Cast_M_node* r) { + return (uint8_t*) r; +} + +void FatalError_die_from_avl_node_malloc_failure (Impl_Trees_Cast_M_node x, uint8_t* ptr) { + fatal_error("large allocator: AVL node allocation failed"); +} +void FatalError_die_from_avl_node_free_failure (uint8_t* ptr) { + fatal_error("large allocator: AVL node deallocation failed"); +} +void FatalError_die_from_malloc_zeroing_check_failure (uint8_t* ptr) { + fatal_error("small allocator: malloc zeroing check failed"); +} +void FatalError_die_from_realloc_invalid_previous_alloc (uint8_t* ptr) { + fatal_error("realloc: invalid realloc"); +} +void FatalError_die_from_realloc_free_failure (uint8_t* ptr) { + fatal_error("realloc: invalid internal free"); } diff --git a/dist/ExternUtils.h b/dist/ExternUtils.h index 32b3f728..a40bbaa4 100644 --- a/dist/ExternUtils.h +++ b/dist/ExternUtils.h @@ -10,14 +10,14 @@ extern uint32_t ffs64(uint64_t x); extern uint32_t clz(uint64_t x); +extern size_t builtin_mul_overflow(size_t x, size_t y); + extern void apply_zeroing_u8(uint8_t *ptr, size_t n); extern bool check_zeroing_u8(uint8_t *ptr, size_t n); extern uint8_t *memcpy_u8(uint8_t *dest, uint8_t *src, size_t n); -extern size_t builtin_mul_overflow(size_t x, size_t y); - #define __ExternUtils_H_DEFINED #endif diff --git a/dist/Slots.c b/dist/Slots.c index a1e61b23..c8667824 100644 --- a/dist/Slots.c +++ b/dist/Slots.c @@ -89,7 +89,10 @@ static uint32_t deallocate_slot_aux1(uint32_t size_class, uint32_t diff) return diff / size_class; } -extern void SlotsFree_deallocate_zeroing(uint32_t size_class, uint8_t *ptr); +static void deallocate_zeroing(uint32_t size_class, uint8_t *ptr) +{ + apply_zeroing_u8(ptr, (size_t)size_class); +} static bool deallocate_slot_(uint32_t size_class, uint64_t *md, uint8_t *ptr, size_t diff_) { @@ -102,7 +105,7 @@ static bool deallocate_slot_(uint32_t size_class, uint64_t *md, uint8_t *ptr, si if (b1) { Bitmap5_bm_unset(md, pos); - SlotsFree_deallocate_zeroing(size_class, ptr); + deallocate_zeroing(size_class, ptr); return true; } else diff --git a/dist/StarMalloc.c b/dist/StarMalloc.c index cb0fc511..f7d565d3 100644 --- a/dist/StarMalloc.c +++ b/dist/StarMalloc.c @@ -3,6 +3,14 @@ #include "internal/StarMalloc.h" +static uint32_t avl_data_size = 64U; + +extern int64_t Impl_Trees_Cast_M_cmp(Impl_Trees_Cast_M_data uu___, Impl_Trees_Cast_M_data x0); + +extern uint8_t *Impl_Trees_Cast_M_ref_node__to__array_u8(Impl_Trees_Cast_M_node *x); + +extern Impl_Trees_Cast_M_node *Impl_Trees_Cast_M_array_u8__to__ref_node(uint8_t *arr); + static void init_idxs(size_t *r_idxs) { r_idxs[0U] = (size_t)16777217U; @@ -46,8 +54,6 @@ init_size_class( size_classes[k].data = data; } -static uint32_t avl_data_size = 64U; - void Impl_Trees_Types_init_mmap_md_slabs(Impl_Trees_Types_mmap_md_slabs *ret) { size_t slab_region_size = (size_t)16777216U * (size_t)4096U; @@ -74,21 +80,28 @@ void Impl_Trees_Types_init_mmap_md_slabs(Impl_Trees_Types_mmap_md_slabs *ret) Impl_Trees_Types_mmap_md_slabs Impl_Trees_Types_metadata_slabs; -extern uint8_t *Impl_Trees_Types_ref_node__to__array_u8(Impl_Trees_Types_node *x); +extern Impl_Trees_Cast_M_node +*FatalError_die_from_avl_node_malloc_failure(Impl_Trees_Cast_M_node x, uint8_t *ptr); -extern Impl_Trees_Types_node *Impl_Trees_Types_array_u8__to__ref_node(uint8_t *arr); +extern void FatalError_die_from_avl_node_free_failure(uint8_t *ptr); -extern int64_t Impl_Trees_Types_cmp(Impl_Trees_Types_data uu___, Impl_Trees_Types_data x0); +extern void FatalError_die_from_malloc_zeroing_check_failure(uint8_t *ptr); -bool Impl_BST_M_member(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v) +extern void FatalError_die_from_realloc_invalid_previous_alloc(uint8_t *ptr); + +extern void FatalError_die_from_realloc_free_failure(uint8_t *ptr); + +extern Impl_Trees_Cast_M_node **mmap_ptr_metadata_init(void); + +bool Impl_BST_M_member(Impl_Trees_Cast_M_node *ptr, Impl_Trees_Cast_M_data v) { if (ptr == NULL) return false; else { - Impl_Trees_Types_node node = *ptr; - Impl_Trees_Types_data data = node.data; - int64_t delta = Impl_Trees_Types_cmp(v, data); + Impl_Trees_Cast_M_node node = *ptr; + Impl_Trees_Cast_M_data data = node.data; + int64_t delta = Impl_Trees_Cast_M_cmp(v, data); if (delta == (int64_t)0) return true; else if (delta < (int64_t)0) @@ -104,20 +117,20 @@ bool Impl_BST_M_member(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v) } } -static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) +static Impl_Trees_Cast_M_node *rotate_left_right(Impl_Trees_Cast_M_node *ptr) { - Impl_Trees_Types_node x_node = *ptr; - Impl_Trees_Types_data x = x_node.data; - Impl_Trees_Types_node z_node = *x_node.left; - Impl_Trees_Types_data z = z_node.data; - Impl_Trees_Types_node y_node = *z_node.right; - Impl_Trees_Types_data y = y_node.data; + Impl_Trees_Cast_M_node x_node = *ptr; + Impl_Trees_Cast_M_data x = x_node.data; + Impl_Trees_Cast_M_node z_node = *x_node.left; + Impl_Trees_Cast_M_data z = z_node.data; + Impl_Trees_Cast_M_node y_node = *z_node.right; + Impl_Trees_Cast_M_data y = y_node.data; uint64_t s10; if (z_node.left == NULL) s10 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; s10 = node.size; } uint64_t s20; @@ -125,7 +138,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) s20 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.left; + Impl_Trees_Cast_M_node node = *y_node.left; s20 = node.size; } uint64_t s = s10 + s20 + 1ULL; @@ -134,7 +147,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h10 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; h10 = node.height; } uint64_t h20; @@ -142,7 +155,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h20 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.left; + Impl_Trees_Cast_M_node node = *y_node.left; h20 = node.height; } uint64_t ite0; @@ -151,16 +164,16 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) else ite0 = h20; uint64_t h = ite0 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = z, .left = z_node.left, .right = y_node.left, .size = s, .height = h }; *x_node.left = n; - Impl_Trees_Types_node *new_z = x_node.left; + Impl_Trees_Cast_M_node *new_z = x_node.left; uint64_t s11; if (y_node.right == NULL) s11 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.right; + Impl_Trees_Cast_M_node node = *y_node.right; s11 = node.size; } uint64_t s21; @@ -168,7 +181,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) s21 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.right; + Impl_Trees_Cast_M_node node = *x_node.right; s21 = node.size; } uint64_t s0 = s11 + s21 + 1ULL; @@ -177,7 +190,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h11 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.right; + Impl_Trees_Cast_M_node node = *y_node.right; h11 = node.height; } uint64_t h21; @@ -185,7 +198,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h21 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.right; + Impl_Trees_Cast_M_node node = *x_node.right; h21 = node.height; } uint64_t ite1; @@ -194,16 +207,16 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) else ite1 = h21; uint64_t h0 = ite1 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n0 = { .data = x, .left = y_node.right, .right = x_node.right, .size = s0, .height = h0 }; *ptr = n0; - Impl_Trees_Types_node *new_x = ptr; + Impl_Trees_Cast_M_node *new_x = ptr; uint64_t s1; if (new_z == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node = *new_z; + Impl_Trees_Cast_M_node node = *new_z; s1 = node.size; } uint64_t s2; @@ -211,7 +224,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) s2 = 0ULL; else { - Impl_Trees_Types_node node = *new_x; + Impl_Trees_Cast_M_node node = *new_x; s2 = node.size; } uint64_t s3 = s1 + s2 + 1ULL; @@ -220,7 +233,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h1 = 0ULL; else { - Impl_Trees_Types_node node = *new_z; + Impl_Trees_Cast_M_node node = *new_z; h1 = node.height; } uint64_t h2; @@ -228,7 +241,7 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) h2 = 0ULL; else { - Impl_Trees_Types_node node = *new_x; + Impl_Trees_Cast_M_node node = *new_x; h2 = node.height; } uint64_t ite; @@ -237,27 +250,27 @@ static Impl_Trees_Types_node *rotate_left_right(Impl_Trees_Types_node *ptr) else ite = h2; uint64_t h3 = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n1 = { .data = y, .left = new_z, .right = new_x, .size = s3, .height = h3 }; *z_node.right = n1; - Impl_Trees_Types_node *new_y = z_node.right; + Impl_Trees_Cast_M_node *new_y = z_node.right; return new_y; } -static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) +static Impl_Trees_Cast_M_node *rotate_right_left(Impl_Trees_Cast_M_node *ptr) { - Impl_Trees_Types_node x_node = *ptr; - Impl_Trees_Types_data x = x_node.data; - Impl_Trees_Types_node z_node = *x_node.right; - Impl_Trees_Types_data z = z_node.data; - Impl_Trees_Types_node y_node = *z_node.left; - Impl_Trees_Types_data y = y_node.data; + Impl_Trees_Cast_M_node x_node = *ptr; + Impl_Trees_Cast_M_data x = x_node.data; + Impl_Trees_Cast_M_node z_node = *x_node.right; + Impl_Trees_Cast_M_data z = z_node.data; + Impl_Trees_Cast_M_node y_node = *z_node.left; + Impl_Trees_Cast_M_data y = y_node.data; uint64_t s10; if (x_node.left == NULL) s10 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.left; + Impl_Trees_Cast_M_node node = *x_node.left; s10 = node.size; } uint64_t s20; @@ -265,7 +278,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) s20 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.left; + Impl_Trees_Cast_M_node node = *y_node.left; s20 = node.size; } uint64_t s = s10 + s20 + 1ULL; @@ -274,7 +287,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h10 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.left; + Impl_Trees_Cast_M_node node = *x_node.left; h10 = node.height; } uint64_t h20; @@ -282,7 +295,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h20 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.left; + Impl_Trees_Cast_M_node node = *y_node.left; h20 = node.height; } uint64_t ite0; @@ -291,16 +304,16 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) else ite0 = h20; uint64_t h = ite0 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = x, .left = x_node.left, .right = y_node.left, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_x = ptr; + Impl_Trees_Cast_M_node *new_x = ptr; uint64_t s11; if (y_node.right == NULL) s11 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.right; + Impl_Trees_Cast_M_node node = *y_node.right; s11 = node.size; } uint64_t s21; @@ -308,7 +321,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) s21 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; s21 = node.size; } uint64_t s0 = s11 + s21 + 1ULL; @@ -317,7 +330,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h11 = 0ULL; else { - Impl_Trees_Types_node node = *y_node.right; + Impl_Trees_Cast_M_node node = *y_node.right; h11 = node.height; } uint64_t h21; @@ -325,7 +338,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h21 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; h21 = node.height; } uint64_t ite1; @@ -334,16 +347,16 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) else ite1 = h21; uint64_t h0 = ite1 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n0 = { .data = z, .left = y_node.right, .right = z_node.right, .size = s0, .height = h0 }; *x_node.right = n0; - Impl_Trees_Types_node *new_z = x_node.right; + Impl_Trees_Cast_M_node *new_z = x_node.right; uint64_t s1; if (new_x == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node = *new_x; + Impl_Trees_Cast_M_node node = *new_x; s1 = node.size; } uint64_t s2; @@ -351,7 +364,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) s2 = 0ULL; else { - Impl_Trees_Types_node node = *new_z; + Impl_Trees_Cast_M_node node = *new_z; s2 = node.size; } uint64_t s3 = s1 + s2 + 1ULL; @@ -360,7 +373,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h1 = 0ULL; else { - Impl_Trees_Types_node node = *new_x; + Impl_Trees_Cast_M_node node = *new_x; h1 = node.height; } uint64_t h2; @@ -368,7 +381,7 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) h2 = 0ULL; else { - Impl_Trees_Types_node node = *new_z; + Impl_Trees_Cast_M_node node = *new_z; h2 = node.height; } uint64_t ite; @@ -377,25 +390,25 @@ static Impl_Trees_Types_node *rotate_right_left(Impl_Trees_Types_node *ptr) else ite = h2; uint64_t h3 = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n1 = { .data = y, .left = new_x, .right = new_z, .size = s3, .height = h3 }; *z_node.left = n1; - Impl_Trees_Types_node *new_y = z_node.left; + Impl_Trees_Cast_M_node *new_y = z_node.left; return new_y; } -static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) +static Impl_Trees_Cast_M_node *rotate_left(Impl_Trees_Cast_M_node *ptr) { - Impl_Trees_Types_node x_node = *ptr; - Impl_Trees_Types_data x = x_node.data; - Impl_Trees_Types_node z_node = *x_node.right; - Impl_Trees_Types_data z = z_node.data; + Impl_Trees_Cast_M_node x_node = *ptr; + Impl_Trees_Cast_M_data x = x_node.data; + Impl_Trees_Cast_M_node z_node = *x_node.right; + Impl_Trees_Cast_M_data z = z_node.data; uint64_t s10; if (x_node.left == NULL) s10 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.left; + Impl_Trees_Cast_M_node node = *x_node.left; s10 = node.size; } uint64_t s20; @@ -403,7 +416,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) s20 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; s20 = node.size; } uint64_t s = s10 + s20 + 1ULL; @@ -412,7 +425,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) h10 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.left; + Impl_Trees_Cast_M_node node = *x_node.left; h10 = node.height; } uint64_t h20; @@ -420,7 +433,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) h20 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; h20 = node.height; } uint64_t ite0; @@ -429,16 +442,16 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) else ite0 = h20; uint64_t h = ite0 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = x, .left = x_node.left, .right = z_node.left, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_subnode = ptr; + Impl_Trees_Cast_M_node *new_subnode = ptr; uint64_t s1; if (new_subnode == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node = *new_subnode; + Impl_Trees_Cast_M_node node = *new_subnode; s1 = node.size; } uint64_t s2; @@ -446,7 +459,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) s2 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; s2 = node.size; } uint64_t s0 = s1 + s2 + 1ULL; @@ -455,7 +468,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) h1 = 0ULL; else { - Impl_Trees_Types_node node = *new_subnode; + Impl_Trees_Cast_M_node node = *new_subnode; h1 = node.height; } uint64_t h2; @@ -463,7 +476,7 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) h2 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; h2 = node.height; } uint64_t ite; @@ -472,25 +485,25 @@ static Impl_Trees_Types_node *rotate_left(Impl_Trees_Types_node *ptr) else ite = h2; uint64_t h0 = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n0 = { .data = z, .left = new_subnode, .right = z_node.right, .size = s0, .height = h0 }; *x_node.right = n0; - Impl_Trees_Types_node *new_node = x_node.right; + Impl_Trees_Cast_M_node *new_node = x_node.right; return new_node; } -static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) +static Impl_Trees_Cast_M_node *rotate_right(Impl_Trees_Cast_M_node *ptr) { - Impl_Trees_Types_node x_node = *ptr; - Impl_Trees_Types_data x = x_node.data; - Impl_Trees_Types_node z_node = *x_node.left; - Impl_Trees_Types_data z = z_node.data; + Impl_Trees_Cast_M_node x_node = *ptr; + Impl_Trees_Cast_M_data x = x_node.data; + Impl_Trees_Cast_M_node z_node = *x_node.left; + Impl_Trees_Cast_M_data z = z_node.data; uint64_t s10; if (z_node.right == NULL) s10 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; s10 = node.size; } uint64_t s20; @@ -498,7 +511,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) s20 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.right; + Impl_Trees_Cast_M_node node = *x_node.right; s20 = node.size; } uint64_t s = s10 + s20 + 1ULL; @@ -507,7 +520,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) h10 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.right; + Impl_Trees_Cast_M_node node = *z_node.right; h10 = node.height; } uint64_t h20; @@ -515,7 +528,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) h20 = 0ULL; else { - Impl_Trees_Types_node node = *x_node.right; + Impl_Trees_Cast_M_node node = *x_node.right; h20 = node.height; } uint64_t ite0; @@ -524,16 +537,16 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) else ite0 = h20; uint64_t h = ite0 + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = x, .left = z_node.right, .right = x_node.right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_subnode = ptr; + Impl_Trees_Cast_M_node *new_subnode = ptr; uint64_t s1; if (z_node.left == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; s1 = node.size; } uint64_t s2; @@ -541,7 +554,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) s2 = 0ULL; else { - Impl_Trees_Types_node node = *new_subnode; + Impl_Trees_Cast_M_node node = *new_subnode; s2 = node.size; } uint64_t s0 = s1 + s2 + 1ULL; @@ -550,7 +563,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) h1 = 0ULL; else { - Impl_Trees_Types_node node = *z_node.left; + Impl_Trees_Cast_M_node node = *z_node.left; h1 = node.height; } uint64_t h2; @@ -558,7 +571,7 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) h2 = 0ULL; else { - Impl_Trees_Types_node node = *new_subnode; + Impl_Trees_Cast_M_node node = *new_subnode; h2 = node.height; } uint64_t ite; @@ -567,26 +580,26 @@ static Impl_Trees_Types_node *rotate_right(Impl_Trees_Types_node *ptr) else ite = h2; uint64_t h0 = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n0 = { .data = z, .left = z_node.left, .right = new_subnode, .size = s0, .height = h0 }; *x_node.left = n0; - Impl_Trees_Types_node *new_node = x_node.left; + Impl_Trees_Cast_M_node *new_node = x_node.left; return new_node; } -static bool is_balanced_local(Impl_Trees_Types_node *ptr) +static bool is_balanced_local(Impl_Trees_Cast_M_node *ptr) { if (ptr == NULL) return true; else { - Impl_Trees_Types_node node = *ptr; + Impl_Trees_Cast_M_node node = *ptr; uint64_t lh; if (node.left == NULL) lh = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; lh = node1.height; } uint64_t rh; @@ -594,7 +607,7 @@ static bool is_balanced_local(Impl_Trees_Types_node *ptr) rh = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; rh = node1.height; } bool b1 = rh + 1ULL >= lh; @@ -603,19 +616,19 @@ static bool is_balanced_local(Impl_Trees_Types_node *ptr) } } -static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) +static Impl_Trees_Cast_M_node *rebalance_avl(Impl_Trees_Cast_M_node *ptr) { if (is_balanced_local(ptr)) return ptr; else { - Impl_Trees_Types_node node = *ptr; + Impl_Trees_Cast_M_node node = *ptr; uint64_t lh; if (node.left == NULL) lh = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; lh = node1.height; } uint64_t rh; @@ -623,18 +636,18 @@ static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) rh = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; rh = node1.height; } if (lh > rh + 1ULL) { - Impl_Trees_Types_node l_node = *node.left; + Impl_Trees_Cast_M_node l_node = *node.left; uint64_t llh; if (l_node.left == NULL) llh = 0ULL; else { - Impl_Trees_Types_node node1 = *l_node.left; + Impl_Trees_Cast_M_node node1 = *l_node.left; llh = node1.height; } uint64_t lrh; @@ -642,7 +655,7 @@ static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) lrh = 0ULL; else { - Impl_Trees_Types_node node1 = *l_node.right; + Impl_Trees_Cast_M_node node1 = *l_node.right; lrh = node1.height; } if (lrh > llh) @@ -652,13 +665,13 @@ static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) } else if (rh > lh + 1ULL) { - Impl_Trees_Types_node r_node = *node.right; + Impl_Trees_Cast_M_node r_node = *node.right; uint64_t rlh; if (r_node.left == NULL) rlh = 0ULL; else { - Impl_Trees_Types_node node1 = *r_node.left; + Impl_Trees_Cast_M_node node1 = *r_node.left; rlh = node1.height; } uint64_t rrh; @@ -666,7 +679,7 @@ static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) rrh = 0ULL; else { - Impl_Trees_Types_node node1 = *r_node.right; + Impl_Trees_Cast_M_node node1 = *r_node.right; rrh = node1.height; } if (rlh > rrh) @@ -679,34 +692,34 @@ static Impl_Trees_Types_node *rebalance_avl(Impl_Trees_Types_node *ptr) } } -Impl_Trees_Types_node +Impl_Trees_Cast_M_node *Impl_AVL_M_insert_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), bool r, - Impl_Trees_Types_node *ptr, - Impl_Trees_Types_data new_data + Impl_Trees_Cast_M_node *ptr, + Impl_Trees_Cast_M_data new_data ) { if (ptr == NULL) { - Impl_Trees_Types_node *l = NULL; - Impl_Trees_Types_node *r1 = NULL; + Impl_Trees_Cast_M_node *l = NULL; + Impl_Trees_Cast_M_node *r1 = NULL; uint64_t sr = 1ULL; uint64_t hr = 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = new_data, .left = l, .right = r1, .size = sr, .height = hr }; - Impl_Trees_Types_node *ptr1 = f1(n); + Impl_Trees_Cast_M_node *ptr1 = f1(n); return ptr1; } else { - Impl_Trees_Types_node node = *ptr; - int64_t delta = Impl_Trees_Types_cmp(node.data, new_data); + Impl_Trees_Cast_M_node node = *ptr; + int64_t delta = Impl_Trees_Cast_M_cmp(node.data, new_data); if (delta == (int64_t)0) if (r) { - Impl_Trees_Types_node + Impl_Trees_Cast_M_node new_node = { .data = new_data, .left = node.left, .right = node.right, .size = node.size, @@ -719,13 +732,13 @@ Impl_Trees_Types_node return ptr; else if (delta > (int64_t)0) { - Impl_Trees_Types_node *new_left = Impl_AVL_M_insert_avl(f1, f2, r, node.left, new_data); + Impl_Trees_Cast_M_node *new_left = Impl_AVL_M_insert_avl(f1, f2, r, node.left, new_data); uint64_t s1; if (new_left == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_left; + Impl_Trees_Cast_M_node node1 = *new_left; s1 = node1.size; } uint64_t s2; @@ -733,7 +746,7 @@ Impl_Trees_Types_node s2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; s2 = node1.size; } uint64_t s = s1 + s2 + 1ULL; @@ -742,7 +755,7 @@ Impl_Trees_Types_node h1 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_left; + Impl_Trees_Cast_M_node node1 = *new_left; h1 = node1.height; } uint64_t h2; @@ -750,7 +763,7 @@ Impl_Trees_Types_node h2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; h2 = node1.height; } uint64_t ite; @@ -759,21 +772,21 @@ Impl_Trees_Types_node else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = node.data, .left = new_left, .right = node.right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr = ptr; return rebalance_avl(new_ptr); } else { - Impl_Trees_Types_node *new_right = Impl_AVL_M_insert_avl(f1, f2, r, node.right, new_data); + Impl_Trees_Cast_M_node *new_right = Impl_AVL_M_insert_avl(f1, f2, r, node.right, new_data); uint64_t s1; if (node.left == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; s1 = node1.size; } uint64_t s2; @@ -781,7 +794,7 @@ Impl_Trees_Types_node s2 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_right; + Impl_Trees_Cast_M_node node1 = *new_right; s2 = node1.size; } uint64_t s = s1 + s2 + 1ULL; @@ -790,7 +803,7 @@ Impl_Trees_Types_node h1 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; h1 = node1.height; } uint64_t h2; @@ -798,7 +811,7 @@ Impl_Trees_Types_node h2 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_right; + Impl_Trees_Cast_M_node node1 = *new_right; h2 = node1.height; } uint64_t ite; @@ -807,10 +820,10 @@ Impl_Trees_Types_node else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = node.data, .left = node.left, .right = new_right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr = ptr; return rebalance_avl(new_ptr); } } @@ -818,15 +831,15 @@ Impl_Trees_Types_node Impl_AVL_M_result Impl_AVL_M_remove_leftmost_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), - Impl_Trees_Types_node *ptr + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), + Impl_Trees_Cast_M_node *ptr ) { - Impl_Trees_Types_node node = *ptr; + Impl_Trees_Cast_M_node node = *ptr; if (node.left == NULL) { - Impl_Trees_Types_data data = node.data; + Impl_Trees_Cast_M_data data = node.data; f2(ptr); return ((Impl_AVL_M_result){ .ptr = node.right, .data = data }); } @@ -838,7 +851,7 @@ Impl_AVL_M_remove_leftmost_avl( s1 = 0ULL; else { - Impl_Trees_Types_node node1 = *r0.ptr; + Impl_Trees_Cast_M_node node1 = *r0.ptr; s1 = node1.size; } uint64_t s2; @@ -846,7 +859,7 @@ Impl_AVL_M_remove_leftmost_avl( s2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; s2 = node1.size; } uint64_t s = s1 + s2 + 1ULL; @@ -855,7 +868,7 @@ Impl_AVL_M_remove_leftmost_avl( h1 = 0ULL; else { - Impl_Trees_Types_node node1 = *r0.ptr; + Impl_Trees_Cast_M_node node1 = *r0.ptr; h1 = node1.height; } uint64_t h2; @@ -863,7 +876,7 @@ Impl_AVL_M_remove_leftmost_avl( h2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; h2 = node1.height; } uint64_t ite; @@ -872,32 +885,32 @@ Impl_AVL_M_remove_leftmost_avl( else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = node.data, .left = r0.ptr, .right = node.right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; - Impl_Trees_Types_node *new_ptr1 = rebalance_avl(new_ptr); + Impl_Trees_Cast_M_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr1 = rebalance_avl(new_ptr); return ((Impl_AVL_M_result){ .ptr = new_ptr1, .data = r0.data }); } } -Impl_Trees_Types_node +Impl_Trees_Cast_M_node *Impl_AVL_M_delete_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), - Impl_Trees_Types_node *ptr, - Impl_Trees_Types_data data_to_rm + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), + Impl_Trees_Cast_M_node *ptr, + Impl_Trees_Cast_M_data data_to_rm ) { if (ptr == NULL) return ptr; else { - Impl_Trees_Types_node node = *ptr; - int64_t delta = Impl_Trees_Types_cmp(data_to_rm, node.data); + Impl_Trees_Cast_M_node node = *ptr; + int64_t delta = Impl_Trees_Cast_M_cmp(data_to_rm, node.data); if (delta == (int64_t)0) { - Impl_Trees_Types_node node1 = *ptr; + Impl_Trees_Cast_M_node node1 = *ptr; if (node1.right == NULL) { f2(ptr); @@ -916,7 +929,7 @@ Impl_Trees_Types_node s1 = 0ULL; else { - Impl_Trees_Types_node node2 = *node1.left; + Impl_Trees_Cast_M_node node2 = *node1.left; s1 = node2.size; } uint64_t s2; @@ -924,7 +937,7 @@ Impl_Trees_Types_node s2 = 0ULL; else { - Impl_Trees_Types_node node2 = *r0.ptr; + Impl_Trees_Cast_M_node node2 = *r0.ptr; s2 = node2.size; } uint64_t s = s1 + s2 + 1ULL; @@ -933,7 +946,7 @@ Impl_Trees_Types_node h1 = 0ULL; else { - Impl_Trees_Types_node node2 = *node1.left; + Impl_Trees_Cast_M_node node2 = *node1.left; h1 = node2.height; } uint64_t h2; @@ -941,7 +954,7 @@ Impl_Trees_Types_node h2 = 0ULL; else { - Impl_Trees_Types_node node2 = *r0.ptr; + Impl_Trees_Cast_M_node node2 = *r0.ptr; h2 = node2.height; } uint64_t ite; @@ -950,23 +963,23 @@ Impl_Trees_Types_node else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = r0.data, .left = node1.left, .right = r0.ptr, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; - Impl_Trees_Types_node *new_ptr1 = rebalance_avl(new_ptr); + Impl_Trees_Cast_M_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr1 = rebalance_avl(new_ptr); return new_ptr1; } } else if (delta < (int64_t)0) { - Impl_Trees_Types_node *new_left = Impl_AVL_M_delete_avl(f1, f2, node.left, data_to_rm); + Impl_Trees_Cast_M_node *new_left = Impl_AVL_M_delete_avl(f1, f2, node.left, data_to_rm); uint64_t s1; if (new_left == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_left; + Impl_Trees_Cast_M_node node1 = *new_left; s1 = node1.size; } uint64_t s2; @@ -974,7 +987,7 @@ Impl_Trees_Types_node s2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; s2 = node1.size; } uint64_t s = s1 + s2 + 1ULL; @@ -983,7 +996,7 @@ Impl_Trees_Types_node h1 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_left; + Impl_Trees_Cast_M_node node1 = *new_left; h1 = node1.height; } uint64_t h2; @@ -991,7 +1004,7 @@ Impl_Trees_Types_node h2 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.right; + Impl_Trees_Cast_M_node node1 = *node.right; h2 = node1.height; } uint64_t ite; @@ -1000,21 +1013,21 @@ Impl_Trees_Types_node else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = node.data, .left = new_left, .right = node.right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr = ptr; return rebalance_avl(new_ptr); } else { - Impl_Trees_Types_node *new_right = Impl_AVL_M_delete_avl(f1, f2, node.right, data_to_rm); + Impl_Trees_Cast_M_node *new_right = Impl_AVL_M_delete_avl(f1, f2, node.right, data_to_rm); uint64_t s1; if (node.left == NULL) s1 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; s1 = node1.size; } uint64_t s2; @@ -1022,7 +1035,7 @@ Impl_Trees_Types_node s2 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_right; + Impl_Trees_Cast_M_node node1 = *new_right; s2 = node1.size; } uint64_t s = s1 + s2 + 1ULL; @@ -1031,7 +1044,7 @@ Impl_Trees_Types_node h1 = 0ULL; else { - Impl_Trees_Types_node node1 = *node.left; + Impl_Trees_Cast_M_node node1 = *node.left; h1 = node1.height; } uint64_t h2; @@ -1039,7 +1052,7 @@ Impl_Trees_Types_node h2 = 0ULL; else { - Impl_Trees_Types_node node1 = *new_right; + Impl_Trees_Cast_M_node node1 = *new_right; h2 = node1.height; } uint64_t ite; @@ -1048,29 +1061,29 @@ Impl_Trees_Types_node else ite = h2; uint64_t h = ite + 1ULL; - Impl_Trees_Types_node + Impl_Trees_Cast_M_node n = { .data = node.data, .left = node.left, .right = new_right, .size = s, .height = h }; *ptr = n; - Impl_Trees_Types_node *new_ptr = ptr; + Impl_Trees_Cast_M_node *new_ptr = ptr; return rebalance_avl(new_ptr); } } } -static size_t snd__Prims_dtuple2__uint8_t_____size_t(Impl_Trees_Types_data x) +static size_t snd__Prims_dtuple2__uint8_t_____size_t(Impl_Trees_Cast_M_data x) { return x.snd; } FStar_Pervasives_Native_option__size_t -Map_M_find(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v) +Map_M_find(Impl_Trees_Cast_M_node *ptr, Impl_Trees_Cast_M_data v) { if (ptr == NULL) return ((FStar_Pervasives_Native_option__size_t){ .tag = FStar_Pervasives_Native_None }); else { - Impl_Trees_Types_node node = *ptr; - int64_t delta = Impl_Trees_Types_cmp(v, node.data); + Impl_Trees_Cast_M_node node = *ptr; + int64_t delta = Impl_Trees_Cast_M_cmp(v, node.data); if (delta == (int64_t)0) { size_t r = snd__Prims_dtuple2__uint8_t_____size_t(node.data); @@ -1090,12 +1103,10 @@ Map_M_find(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v) } } -extern Impl_Trees_Types_node **mmap_ptr_metadata(void); - void init_mmap_md(mmap_md *ret) { - Impl_Trees_Types_node **ptr = mmap_ptr_metadata(); - Impl_Trees_Types_node *tree = NULL; + Impl_Trees_Cast_M_node **ptr = mmap_ptr_metadata_init(); + Impl_Trees_Cast_M_node *tree = NULL; *ptr = tree; Steel_SpinLock_new_lock(&ret->lock); ret->data = ptr; @@ -1103,56 +1114,57 @@ void init_mmap_md(mmap_md *ret) mmap_md metadata; -static Impl_Trees_Types_node *trees_malloc2(Impl_Trees_Types_node x) +static Impl_Trees_Cast_M_node *trees_malloc2(Impl_Trees_Cast_M_node x) { Steel_SpinLock_acquire(&Impl_Trees_Types_metadata_slabs.lock); uint8_t *ptr = SizeClass_allocate_size_class(Impl_Trees_Types_metadata_slabs.scs); - Impl_Trees_Types_node *r; + Impl_Trees_Cast_M_node *r0; if (ptr == NULL) - r = NULL; + { + Impl_Trees_Cast_M_node *r = FatalError_die_from_avl_node_malloc_failure(x, ptr); + r0 = r; + } else { - Impl_Trees_Types_node *r_ = Impl_Trees_Types_array_u8__to__ref_node(ptr); + Impl_Trees_Cast_M_node *r_ = Impl_Trees_Cast_M_array_u8__to__ref_node(ptr); *r_ = x; - r = r_; + r0 = r_; } Steel_SpinLock_release(&Impl_Trees_Types_metadata_slabs.lock); - Impl_Trees_Types_node *r0 = r; - return r0; + Impl_Trees_Cast_M_node *r = r0; + return r; } -static void trees_free2(Impl_Trees_Types_node *r) +static void trees_free2(Impl_Trees_Cast_M_node *r) { Steel_SpinLock_acquire(&Impl_Trees_Types_metadata_slabs.lock); - uint8_t *ptr = Impl_Trees_Types_ref_node__to__array_u8(r); + uint8_t *ptr = Impl_Trees_Cast_M_ref_node__to__array_u8(r); uint8_t *pt0 = ptr; uint8_t *pt1 = Impl_Trees_Types_metadata_slabs.slab_region; ptrdiff_t diff = pt0 - pt1; size_t diff_sz = (size_t)diff; bool b = SizeClass_deallocate_size_class(Impl_Trees_Types_metadata_slabs.scs, ptr, diff_sz); - if (b) - { - - } + if (!b) + FatalError_die_from_avl_node_free_failure(ptr); Steel_SpinLock_release(&Impl_Trees_Types_metadata_slabs.lock); } static uint8_t *large_malloc(size_t size) { Steel_SpinLock_acquire(&metadata.lock); - Impl_Trees_Types_node *md_v0 = *metadata.data; + Impl_Trees_Cast_M_node *md_v0 = *metadata.data; uint64_t md_size; if (md_v0 == NULL) md_size = 0ULL; else { - Impl_Trees_Types_node node = *md_v0; + Impl_Trees_Cast_M_node node = *md_v0; md_size = node.size; } uint8_t *r; if (md_size < 18446744073709551615ULL) { - Impl_Trees_Types_node *md_v = *metadata.data; + Impl_Trees_Cast_M_node *md_v = *metadata.data; uint8_t *ptr0 = mmap_u8(size); uint8_t *ptr; if (ptr0 == NULL) @@ -1160,18 +1172,18 @@ static uint8_t *large_malloc(size_t size) else { size_t size_ = PtrdiffWrapper_mmap_actual_size(size); - bool b = Impl_BST_M_member(md_v, ((Impl_Trees_Types_data){ .fst = ptr0, .snd = size_ })); + bool b = Impl_BST_M_member(md_v, ((Impl_Trees_Cast_M_data){ .fst = ptr0, .snd = size_ })); if (b) ptr = NULL; else { - Impl_Trees_Types_node + Impl_Trees_Cast_M_node *md_v_ = Impl_AVL_M_insert_avl(trees_malloc2, trees_free2, false, md_v, - ((Impl_Trees_Types_data){ .fst = ptr0, .snd = size_ })); + ((Impl_Trees_Cast_M_data){ .fst = ptr0, .snd = size_ })); *metadata.data = md_v_; ptr = ptr0; } @@ -1196,8 +1208,8 @@ static bool uu___is_Some__size_t(FStar_Pervasives_Native_option__size_t projecte static bool large_free(uint8_t *ptr) { Steel_SpinLock_acquire(&metadata.lock); - Impl_Trees_Types_node *md_v = *metadata.data; - Impl_Trees_Types_data k_elem = { .fst = ptr, .snd = (size_t)0U }; + Impl_Trees_Cast_M_node *md_v = *metadata.data; + Impl_Trees_Cast_M_data k_elem = { .fst = ptr, .snd = (size_t)0U }; FStar_Pervasives_Native_option__size_t size = Map_M_find(md_v, k_elem); bool r; if (uu___is_Some__size_t(size)) @@ -1208,12 +1220,12 @@ static bool large_free(uint8_t *ptr) else size1 = KRML_EABORT(size_t, "unreachable (pattern matches are exhaustive in F*)"); munmap_u8(ptr, size1); - Impl_Trees_Types_node + Impl_Trees_Cast_M_node *md_v_ = Impl_AVL_M_delete_avl(trees_malloc2, trees_free2, md_v, - ((Impl_Trees_Types_data){ .fst = ptr, .snd = size1 })); + ((Impl_Trees_Cast_M_data){ .fst = ptr, .snd = size1 })); *metadata.data = md_v_; r = true; } @@ -1224,11 +1236,11 @@ static bool large_free(uint8_t *ptr) return b; } -static size_t large_getsize_aux(Impl_Trees_Types_node **metadata1, uint8_t *ptr) +static size_t large_getsize_aux(Impl_Trees_Cast_M_node **metadata1, uint8_t *ptr) { - Impl_Trees_Types_node *md_v = *metadata1; + Impl_Trees_Cast_M_node *md_v = *metadata1; FStar_Pervasives_Native_option__size_t - size = Map_M_find(md_v, ((Impl_Trees_Types_data){ .fst = ptr, .snd = (size_t)0U })); + size = Map_M_find(md_v, ((Impl_Trees_Cast_M_data){ .fst = ptr, .snd = (size_t)0U })); if (uu___is_Some__size_t(size)) if (size.tag == FStar_Pervasives_Native_Some) return size.v; @@ -2231,7 +2243,7 @@ uint8_t *StarMalloc_malloc(size_t arena_id, size_t size) return ptr; else { - StarMalloc_malloc_zeroing_die(ptr); + FatalError_die_from_malloc_zeroing_check_failure(ptr); return NULL; } } @@ -2266,7 +2278,7 @@ uint8_t *StarMalloc_aligned_alloc(size_t arena_id, size_t alignment, size_t size return ptr; else { - StarMalloc_malloc_zeroing_die(ptr); + FatalError_die_from_malloc_zeroing_check_failure(ptr); return NULL; } } @@ -2338,7 +2350,10 @@ uint8_t *StarMalloc_realloc(size_t arena_id, uint8_t *ptr, size_t new_size) if (b) return NULL; else + { + FatalError_die_from_realloc_free_failure(ptr); return NULL; + } } else { @@ -2360,7 +2375,10 @@ uint8_t *StarMalloc_realloc(size_t arena_id, uint8_t *ptr, size_t new_size) bool large_case_optim_condition = !old_allocation_is_small && same_case && new_size <= old_size; if (old_size == (size_t)0U) + { + FatalError_die_from_realloc_invalid_previous_alloc(ptr); return NULL; + } else if (small_case_optim_condition || large_case_optim_condition) return ptr; else @@ -2380,7 +2398,10 @@ uint8_t *StarMalloc_realloc(size_t arena_id, uint8_t *ptr, size_t new_size) if (b) return new_ptr; else + { + FatalError_die_from_realloc_free_failure(ptr); return NULL; + } } } } diff --git a/dist/StarMalloc.h b/dist/StarMalloc.h index 96dcdef8..4bf011ae 100644 --- a/dist/StarMalloc.h +++ b/dist/StarMalloc.h @@ -14,8 +14,6 @@ #include "ExternUtils.h" #include "ArrayList.h" -extern void StarMalloc_malloc_zeroing_die(uint8_t *ptr); - extern size_t StarMalloc_threshold; uint8_t *StarMalloc_malloc(size_t arena_id, size_t size); diff --git a/dist/internal/StarMalloc.h b/dist/internal/StarMalloc.h index 1436e5b5..8ee14167 100644 --- a/dist/internal/StarMalloc.h +++ b/dist/internal/StarMalloc.h @@ -8,70 +8,70 @@ #include "../StarMalloc.h" -typedef struct Impl_Trees_Types_mmap_md_slabs_s -{ - uint8_t *slab_region; - SizeClass_size_class_struct_ scs; - Steel_SpinLock_lock lock; -} -Impl_Trees_Types_mmap_md_slabs; - -void Impl_Trees_Types_init_mmap_md_slabs(Impl_Trees_Types_mmap_md_slabs *ret); - -extern Impl_Trees_Types_mmap_md_slabs Impl_Trees_Types_metadata_slabs; - -typedef struct Impl_Trees_Types_data_s +typedef struct Impl_Trees_Cast_M_data_s { uint8_t *fst; size_t snd; } -Impl_Trees_Types_data; +Impl_Trees_Cast_M_data; -typedef struct Impl_Trees_Types_node_s Impl_Trees_Types_node; +typedef struct Impl_Trees_Cast_M_node_s Impl_Trees_Cast_M_node; -typedef struct Impl_Trees_Types_node_s Impl_Trees_Types_node; +typedef struct Impl_Trees_Cast_M_node_s Impl_Trees_Cast_M_node; -typedef struct Impl_Trees_Types_node_s +typedef struct Impl_Trees_Cast_M_node_s { - Impl_Trees_Types_data data; - Impl_Trees_Types_node *left; - Impl_Trees_Types_node *right; + Impl_Trees_Cast_M_data data; + Impl_Trees_Cast_M_node *left; + Impl_Trees_Cast_M_node *right; uint64_t size; uint64_t height; } -Impl_Trees_Types_node; +Impl_Trees_Cast_M_node; + +typedef struct Impl_Trees_Types_mmap_md_slabs_s +{ + uint8_t *slab_region; + SizeClass_size_class_struct_ scs; + Steel_SpinLock_lock lock; +} +Impl_Trees_Types_mmap_md_slabs; + +void Impl_Trees_Types_init_mmap_md_slabs(Impl_Trees_Types_mmap_md_slabs *ret); + +extern Impl_Trees_Types_mmap_md_slabs Impl_Trees_Types_metadata_slabs; -bool Impl_BST_M_member(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v); +bool Impl_BST_M_member(Impl_Trees_Cast_M_node *ptr, Impl_Trees_Cast_M_data v); -Impl_Trees_Types_node +Impl_Trees_Cast_M_node *Impl_AVL_M_insert_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), bool r, - Impl_Trees_Types_node *ptr, - Impl_Trees_Types_data new_data + Impl_Trees_Cast_M_node *ptr, + Impl_Trees_Cast_M_data new_data ); typedef struct Impl_AVL_M_result_s { - Impl_Trees_Types_node *ptr; - Impl_Trees_Types_data data; + Impl_Trees_Cast_M_node *ptr; + Impl_Trees_Cast_M_data data; } Impl_AVL_M_result; Impl_AVL_M_result Impl_AVL_M_remove_leftmost_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), - Impl_Trees_Types_node *ptr + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), + Impl_Trees_Cast_M_node *ptr ); -Impl_Trees_Types_node +Impl_Trees_Cast_M_node *Impl_AVL_M_delete_avl( - Impl_Trees_Types_node *(*f1)(Impl_Trees_Types_node x0), - void (*f2)(Impl_Trees_Types_node *x0), - Impl_Trees_Types_node *ptr, - Impl_Trees_Types_data data_to_rm + Impl_Trees_Cast_M_node *(*f1)(Impl_Trees_Cast_M_node x0), + void (*f2)(Impl_Trees_Cast_M_node *x0), + Impl_Trees_Cast_M_node *ptr, + Impl_Trees_Cast_M_data data_to_rm ); #define FStar_Pervasives_Native_None 0 @@ -87,11 +87,11 @@ typedef struct FStar_Pervasives_Native_option__size_t_s FStar_Pervasives_Native_option__size_t; FStar_Pervasives_Native_option__size_t -Map_M_find(Impl_Trees_Types_node *ptr, Impl_Trees_Types_data v); +Map_M_find(Impl_Trees_Cast_M_node *ptr, Impl_Trees_Cast_M_data v); typedef struct mmap_md_s { - Impl_Trees_Types_node **data; + Impl_Trees_Cast_M_node **data; Steel_SpinLock_lock lock; } mmap_md; diff --git a/lib_avl_mono/Impl.Trees.Cast.M.fst b/lib_avl_mono/Impl.Trees.Cast.M.fst new file mode 100644 index 00000000..6bcd2356 --- /dev/null +++ b/lib_avl_mono/Impl.Trees.Cast.M.fst @@ -0,0 +1,102 @@ +module Impl.Trees.Cast.M + +module US = FStar.SizeT +module U32 = FStar.UInt32 +module U8 = FStar.UInt8 +module I64 = FStar.Int64 + +module R = Steel.Reference +module A = Steel.Array +open Steel.Effect.Atomic +open Steel.Effect + +module G = FStar.Ghost + +open Constants +open Utils2 + +/// lib_avl_mono/Impl.Trees.Cast.M.fst contains axiomatization that is required to reuse the slab allocator for the allocation of the large allocation metadata AVL tree's nodes. In particular, it consists mostly of corresponding casts. + +// this is a compilation-time assert, see c/utils.c static_assert usage +assume val avl_data_size_aux : v:U32.t{U32.v v <= 64} + +let avl_data_size : v:sc{U32.v avl_data_size_aux <= U32.v v} = 64ul + +type data = x: (array U8.t * US.t){ + ( + US.v (snd x) > 0 /\ + US.v (snd x) > U32.v page_size /\ + A.length (fst x) == US.v (snd x) /\ + A.is_full_array (fst x) + ) + \/ + US.v (snd x) == 0 +} + +open Impl.Core +let node = node data + +// CAUTION: +// the refinement implies that the injectivity +// of the cast uint8_t* -> uintptr_t +// over: +// - valid pointers of large allocations +// (that is the set contained by the AVL tree) +// - those supplied by the user to free and realloc functions +// is part of the model +assume val cmp + : f:Impl.Common.cmp data{ + forall (x y: data). I64.v (f x y) == 0 ==> fst x == fst y + } + +assume val ref_node__to__array_u8_tot + (x: R.ref node) + : Pure (G.erased (array U8.t)) + (requires not (R.is_null x)) + (ensures fun r -> + not (A.is_null (G.reveal r)) /\ + A.length (G.reveal r) == U32.v avl_data_size + ) + +assume val ref_node__to__array_u8 + (x: R.ref node) + : Steel (array U8.t) + (R.vptr x) + (fun r -> A.varray r) + (requires fun _ -> not (R.is_null x)) + (ensures fun _ r _ -> + not (R.is_null x) /\ + not (A.is_null r) /\ + A.length r == U32.v avl_data_size /\ + r == G.reveal (ref_node__to__array_u8_tot x) + ) + +assume val array_u8__to__ref_node_tot + (arr: array U8.t) + : Pure (G.erased (R.ref node)) + (requires A.length arr == U32.v avl_data_size) + (ensures fun r -> + not (R.is_null (G.reveal r)) + ) + +assume val array_u8__to__ref_node + (arr: array U8.t) + : Steel (R.ref node) + (A.varray arr) + (fun r -> R.vptr r) + (requires fun _ -> A.length arr == U32.v avl_data_size) + (ensures fun _ r _ -> + not (R.is_null r) /\ + A.length arr == U32.v avl_data_size /\ + r == G.reveal (array_u8__to__ref_node_tot arr) + ) + +assume val array_u8__to__ref_node_bijectivity + (ptr: array U8.t) + : Lemma + (requires A.length ptr == U32.v avl_data_size) + (ensures ( + let x_cast1 = G.reveal (array_u8__to__ref_node_tot ptr) in + let x_cast2 = G.reveal (ref_node__to__array_u8_tot x_cast1) in + ptr == x_cast2 + )) diff --git a/lib_avl_mono/Impl.Trees.Types.fst b/lib_avl_mono/Impl.Trees.Types.fst index 8198bf4b..2cb26b2b 100644 --- a/lib_avl_mono/Impl.Trees.Types.fst +++ b/lib_avl_mono/Impl.Trees.Types.fst @@ -11,7 +11,6 @@ module US = FStar.SizeT module U64 = FStar.UInt64 module U32 = FStar.UInt32 module U8 = FStar.UInt8 -module I64 = FStar.Int64 noextract inline_for_extraction let array = Steel.ST.Array.array @@ -20,14 +19,23 @@ open Constants open Config open Utils2 -// this is a compilation-time assert, see c/utils.c static_assert usage -assume val avl_data_size_aux : v:U32.t{U32.v v <= 64} - -let avl_data_size : v:sc{U32.v avl_data_size_aux <= U32.v v} = 64ul - open SizeClass open Main +open Impl.Trees.Cast.M + +inline_for_extraction noextract +let data = data + +inline_for_extraction noextract +let node = node + +inline_for_extraction noextract +let cmp = cmp + +inline_for_extraction noextract +let avl_data_size = avl_data_size + inline_for_extraction noextract val init_avl_scs (slab_region: array U8.t) : Steel (size_class_struct) @@ -97,74 +105,8 @@ let metadata_slabs : mmap_md_slabs = init_mmap_md_slabs () #restart-solver -type data = x: (array U8.t * US.t){ - ( - US.v (snd x) > 0 /\ - US.v (snd x) > U32.v page_size /\ - A.length (fst x) == US.v (snd x) /\ - A.is_full_array (fst x) - ) - \/ - US.v (snd x) == 0 -} - -let node = node data - module G = FStar.Ghost -assume val ref_node__to__array_u8_tot - (x: ref node) - : Pure (G.erased (array U8.t)) - (requires not (is_null x)) - (ensures fun r -> - not (A.is_null (G.reveal r)) /\ - A.length (G.reveal r) == U32.v avl_data_size - ) - -assume val ref_node__to__array_u8 - (x: ref node) - : Steel (array U8.t) - (vptr x) - (fun r -> A.varray r) - (requires fun _ -> not (is_null x)) - (ensures fun _ r _ -> - not (is_null x) /\ - not (A.is_null r) /\ - A.length r == U32.v avl_data_size /\ - r == G.reveal (ref_node__to__array_u8_tot x) - ) - -assume val array_u8__to__ref_node_tot - (arr: array U8.t) - : Pure (G.erased (ref node)) - (requires A.length arr == U32.v avl_data_size) - (ensures fun r -> - not (is_null (G.reveal r)) - ) - -assume val array_u8__to__ref_node - (arr: array U8.t) - : Steel (ref node) - (A.varray arr) - (fun r -> vptr r) - (requires fun _ -> A.length arr == U32.v avl_data_size) - (ensures fun _ r _ -> - not (is_null r) /\ - A.length arr == U32.v avl_data_size /\ - r == G.reveal (array_u8__to__ref_node_tot arr) - ) - -//CAUTION -assume val array_u8__to__ref_node_bijectivity - (ptr: array U8.t) - : Lemma - (requires A.length ptr == U32.v avl_data_size) - (ensures ( - let x_cast1 = G.reveal (array_u8__to__ref_node_tot ptr) in - let x_cast2 = G.reveal (ref_node__to__array_u8_tot x_cast1) in - ptr == x_cast2 - )) - module UP = FStar.PtrdiffT #push-options "--fuel 0 --ifuel 0" @@ -173,7 +115,7 @@ let p : hpred data G.hide (fun (x: ref node) -> is_null x \/ (not (is_null x) /\ - (let ptr = ref_node__to__array_u8_tot x in + (let ptr = Impl.Trees.Cast.M.ref_node__to__array_u8_tot x in same_base_array ptr metadata_slabs.scs.slab_region /\ UP.fits (A.offset (A.ptr_of ptr) - A.offset (A.ptr_of metadata_slabs.scs.slab_region)) /\ A.offset (A.ptr_of ptr) - A.offset (A.ptr_of metadata_slabs.scs.slab_region) >= 0 /\ @@ -184,19 +126,6 @@ let p : hpred data let t = x:(t data){(G.reveal p) x} -// CAUTION: -// the refinement implies that the injectivity -// of the cast uint8_t* -> uintptr_t -// over: -// - valid pointers of large allocations -// (that is the set contained by the AVL tree) -// - those supplied by the user to free and realloc functions -// is part of the model -assume val cmp - : f:Impl.Common.cmp data{ - forall (x y: data). I64.v (f x y) == 0 ==> fst x == fst y - } - unfold type f_malloc = (x: node) -> Steel (ref node) emp (fun r -> vptr r) diff --git a/src/ArrayAlignment.fst b/src/ArrayAlignment.fst new file mode 100644 index 00000000..b47e28a5 --- /dev/null +++ b/src/ArrayAlignment.fst @@ -0,0 +1,36 @@ +module ArrayAlignment + +module U32 = FStar.UInt32 +module U8 = FStar.UInt8 +module A = Steel.Array + +let array = Steel.ST.Array.array +let ptr = Steel.ST.Array.ptr + +noextract +unfold let same_base_array (#a: Type) (arr1 arr2: array a) + = + A.base (A.ptr_of arr1) == A.base (A.ptr_of arr2) + +/// src/ArrayAlignment.fst contains axiomatization of array alignments. + +// abstract property that the underlying pointer v-bytes aligned +assume val array_u8_alignment (arr: array U8.t) (v: U32.t{U32.v v > 0}): prop + +// no model for the memory considered as the "root" array in a tree-like representation +// thus, this *axiom* is required +// to convey that if arr (of type uint8_t*) is v1-bytes aligned, +// that v1 is a multiple of v2, +// then forall k, +// then arr[k * v2] is v2-bytes aligned +assume val array_u8_alignment_lemma + (arr1 arr2: array U8.t) + (v1 v2: (v:U32.t{U32.v v > 0})) + : Lemma + (requires + same_base_array arr1 arr2 /\ + array_u8_alignment arr1 v1 /\ + (U32.v v1) % (U32.v v2) == 0 /\ + (A.offset (A.ptr_of arr2) - A.offset (A.ptr_of arr1)) % (U32.v v2) == 0) + (ensures + array_u8_alignment arr2 v2) diff --git a/src/ExternUtils.fst b/src/ExternUtils.fst index 6a80c6eb..03076123 100644 --- a/src/ExternUtils.fst +++ b/src/ExternUtils.fst @@ -13,12 +13,15 @@ module A = Steel.Array open Utils2 +/// src/ExternUtils.fst contains axiomatizations of various compiler builtins or basic C code. +/// let check (_:unit) = assert (not (nth_is_zero U64.one (U32.uint_to_t 0))); assert (nth_is_zero U64.one (U32.uint_to_t 1)); () +// bitwise-operations compiler builtin assume val ffs64 (x: U64.t) (bound: G.erased U32.t) : Pure U32.t (requires @@ -34,6 +37,7 @@ assume val ffs64 (x: U64.t) (bound: G.erased U32.t) // count leading zeroes, // returns r, the number of rightmost bits set to zero +// bitwise-operations compiler builtin assume val clz (x: U64.t) : Pure U32.t (requires U64.v x > 0) @@ -43,6 +47,18 @@ assume val clz (x: U64.t) not (nth_is_zero x (U32.uint_to_t (63 - U32.v r))) ) +open FStar.Mul + +// compiler builtin +assume val builtin_mul_overflow(x y: US.t) + : Pure US.t + (requires True) + (ensures fun r -> + US.fits (US.v x * US.v y) /\ + US.v r == US.v x * US.v y + ) + +// explicit_bzero zeroing function wrapper assume val apply_zeroing_u8 (ptr: array U8.t) (n: US.t) @@ -54,34 +70,11 @@ assume val apply_zeroing_u8 ) (ensures fun h0 _ h1 -> A.length ptr >= US.v n /\ + // n first bytes of ptr are zeroes after execution of the function zf_u8 (Seq.slice (A.asel ptr h1) 0 (US.v n)) ) -//let apply_zeroing_u8_cond -// (ptr: array U8.t) -// (n: US.t) -// : Steel unit -// (A.varray ptr) -// (fun _ -> A.varray ptr) -// (requires fun h0 -> -// A.length ptr >= US.v n -// ) -// (ensures fun h0 _ h1 -> -// A.length ptr >= US.v n /\ -// (not enable_zeroing_free ==> -// A.asel ptr h1 == A.asel ptr h0 -// ) /\ -// (enable_zeroing_free ==> -// zf_u8 (Seq.slice (A.asel ptr h1) 0 (US.v n)) -// ) -// ) -// = -// if enable_zeroing_free then ( -// apply_zeroing_u8 ptr n -// ) else ( -// return () -// ) - +// check whether an array is zeroed assume val check_zeroing_u8 (ptr: array U8.t) (n: US.t) @@ -96,6 +89,7 @@ assume val check_zeroing_u8 (b ==> zf_u8 (Seq.slice (A.asel ptr h1) 0 (US.v n))) ) +// memcpy function wrapper assume val memcpy_u8 (dest src: array U8.t) (n: US.t) : Steel (array U8.t) @@ -113,13 +107,3 @@ assume val memcpy_u8 Seq.slice (A.asel src h0) 0 (US.v n) /\ A.asel src h1 == A.asel src h0 ) - -open FStar.Mul - -assume val builtin_mul_overflow(x y: US.t) - : Pure US.t - (requires True) - (ensures fun r -> - US.fits (US.v x * US.v y) /\ - US.v r == US.v x * US.v y - ) diff --git a/src/FatalError.fst b/src/FatalError.fst new file mode 100644 index 00000000..5be62887 --- /dev/null +++ b/src/FatalError.fst @@ -0,0 +1,58 @@ +module FatalError + +open Steel.Effect.Atomic +open Steel.Effect +module U8 = FStar.UInt8 +module A = Steel.Array +module R = Steel.Reference + +module G = FStar.Ghost + +open Config +open Utils2 +open NullOrVarray + +/// src/FatalError.fst contains context-specific wrappers of the C fatal_error function. + +open Impl.Trees.Cast.M +open Impl.Trees.Types + +// caller: LargeAlloc.trees_malloc2_aux +assume val die_from_avl_node_malloc_failure (x: node) (ptr: array U8.t) + : Steel (R.ref node) + (if A.is_null ptr then emp else A.varray ptr) + (fun r -> R.vptr r) + (requires fun _ -> + A.is_null ptr + ) + (ensures fun _ r h1 -> + R.sel r h1 == x /\ + not (R.is_null r) /\ + (G.reveal p) r + ) + +// caller: LargeAlloc.trees_free2_aux +assume val die_from_avl_node_free_failure (ptr: array U8.t) + : SteelT unit + (A.varray ptr) + (fun _ -> emp) + +// callers: StarMalloc.{malloc,calloc} +assume val die_from_malloc_zeroing_check_failure (ptr: array U8.t) + : Steel unit + (A.varray ptr) + (fun _ -> emp) + (requires fun _ -> enable_zeroing_malloc) + (ensures fun _ _ _ -> True) + +// caller: StarMalloc.realloc +assume val die_from_realloc_invalid_previous_alloc (ptr: array U8.t) + : SteelT unit + (A.varray ptr) + (fun _ -> emp) + +// caller: StarMalloc.realloc +assume val die_from_realloc_free_failure (ptr: array U8.t) + : SteelT unit + (A.varray ptr) + (fun _ -> emp) diff --git a/src/LargeAlloc.fst b/src/LargeAlloc.fst index b3cf116d..05e6e002 100644 --- a/src/LargeAlloc.fst +++ b/src/LargeAlloc.fst @@ -1,6 +1,5 @@ module LargeAlloc -open FStar.Ghost open Steel.Effect.Atomic open Steel.Effect module A = Steel.Array @@ -18,23 +17,23 @@ module G = FStar.Ghost open Impl.Mono open Map.M open Impl.Core -open Impl.Trees.Types open Prelude open Constants open Utils2 open NullOrVarray -#set-options "--ide_id_info_off" - #push-options "--fuel 0 --ifuel 0" open Steel.Reference -assume val mmap_ptr_metadata (_:unit) - : SteelT (ref t) - emp - (fun r -> vptr r) +open Mman2 + +open Impl.Trees.Types + +inline_for_extraction noextract +unfold +let linked_tree = Impl.Core.linked_tree #data // is well-formed (AVL + content) let is_wf (x: wdm data) : prop @@ -64,7 +63,7 @@ type mmap_md = let init_mmap_md (_:unit) : SteelTop mmap_md false (fun _ -> emp) (fun _ _ _ -> True) = - let ptr = mmap_ptr_metadata () in + let ptr = mmap_ptr_metadata_init () in let tree = create_leaf () in write ptr tree; (**) intro_vrefine (linked_tree p tree) is_wf; @@ -114,9 +113,8 @@ let trees_malloc2_aux (x: node) let ptr = SizeClass.allocate_size_class metadata_slabs.scs in if A.is_null ptr then ( - // this should trigger a fatal error - sladmit (); - return null + let r = FatalError.die_from_avl_node_malloc_failure x ptr in + return r ) else ( change_equal_slprop (if (A.is_null ptr) then emp else A.varray ptr) @@ -131,8 +129,8 @@ let trees_malloc2_aux (x: node) <= (US.v metadata_max * U32.v page_size) * US.v nb_size_classes * US.v nb_arenas); up_fits_propagation ptr metadata_slabs.scs.slab_region; - let r' = array_u8__to__ref_node ptr in - array_u8__to__ref_node_bijectivity ptr; + let r' = Impl.Trees.Cast.M.array_u8__to__ref_node ptr in + Impl.Trees.Cast.M.array_u8__to__ref_node_bijectivity ptr; write r' x; return r' ) @@ -189,7 +187,7 @@ let trees_free2_aux (r: ref node) (ensures fun _ _ _-> True) = vptr_not_null r; - let ptr = ref_node__to__array_u8 r in + let ptr = Impl.Trees.Cast.M.ref_node__to__array_u8 r in let diff = A.ptrdiff ptr (A.split_l metadata_slabs.slab_region 0sz) in let diff_sz = UP.ptrdifft_to_sizet diff in assert (US.v diff_sz = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of metadata_slabs.scs.slab_region)); @@ -201,8 +199,10 @@ let trees_free2_aux (r: ref node) emp; return () ) else ( - // this should trigger a fatal error - sladmit (); + change_equal_slprop + (if b then emp else A.varray ptr) + (A.varray ptr); + FatalError.die_from_avl_node_free_failure ptr; return () ) @@ -229,11 +229,6 @@ let trees_free2 (r: ref node) in return r -// machine representation -inline_for_extraction noextract -unfold -let linked_tree = Impl.Core.linked_tree #data - noextract inline_for_extraction let mmap = Mman.mmap_u8 @@ -257,9 +252,6 @@ let find = Map.M.find open Config -inline_for_extraction noextract -let mmap_actual_size = PtrdiffWrapper.mmap_actual_size - #push-options "--fuel 1 --ifuel 1 --z3rlimit 100" [@@ __steel_reduce__] let v_ind_tree @@ -279,7 +271,11 @@ let v_ind_tree open PtrdiffWrapper -#push-options "--fuel 1 --ifuel 1 --z3rlimit 100" +#push-options "--print_implicits" + +#restart-solver + +#push-options "--fuel 1 --ifuel 1 --z3rlimit 200" inline_for_extraction noextract let large_malloc_aux (metadata_ptr: ref t) @@ -325,7 +321,9 @@ let large_malloc_aux (linked_tree p t) (linked_tree p md_v); (**) let h0 = get () in - (**) Spec.height_lte_size (v_linked_tree p md_v h0); + (**) let tree : G.erased (x:wdm data{is_wf x}) = G.hide (v_linked_tree p md_v h0) in + assert (Spec.forall_keys tree (fun x -> US.v (snd x) <> 0)); + (**) Spec.height_lte_size tree; let ptr = mmap size in if (A.is_null ptr) then ( (**) intro_vrefine (linked_tree p md_v) is_wf; @@ -344,8 +342,8 @@ let large_malloc_aux ) else ( let h0 = get () in let md_v' = insert false md_v (ptr, size') in - Spec.lemma_insert false (spec_convert cmp) (v_linked_tree p md_v h0) (ptr, size'); - Spec.lemma_insert2 (spec_convert cmp) (v_linked_tree p md_v h0) (ptr, size') (fun x -> US.v (snd x) <> 0); + Spec.lemma_insert false (spec_convert cmp) tree (ptr, size'); + Spec.lemma_insert2 #data (spec_convert cmp) tree (ptr, size') (fun x -> US.v (snd x) <> 0); write metadata_ptr md_v'; (**) intro_vrefine (linked_tree p md_v') is_wf; (**) intro_vdep (vptr metadata_ptr) (linked_wf_tree md_v') linked_wf_tree; @@ -435,7 +433,7 @@ let large_free_aux let h0 = get () in let md_v' = delete md_v (ptr, size) in Spec.lemma_delete (spec_convert cmp) (v_linked_tree p md_v h0) (ptr, size); - Spec.lemma_delete2 (spec_convert cmp) (v_linked_tree p md_v h0) (ptr, size) (fun x -> US.v (snd x) <> 0); + Spec.lemma_delete2 #data (spec_convert cmp) (v_linked_tree p md_v h0) (ptr, size) (fun x -> US.v (snd x) <> 0); write metadata_ptr md_v'; (**) intro_vrefine (linked_tree p md_v') is_wf; (**) intro_vdep (vptr metadata_ptr) (linked_wf_tree md_v') linked_wf_tree; @@ -609,6 +607,4 @@ let large_getsize (ptr: array U8.t) return r (*) -- convert AVL lib to use size_t instead of u64 -- find: some improvements ahead? (better spec) - use a large vdep between avl and mmap'ed allocations? diff --git a/src/LargeAlloc.fsti b/src/LargeAlloc.fsti new file mode 100644 index 00000000..e6fbda74 --- /dev/null +++ b/src/LargeAlloc.fsti @@ -0,0 +1,57 @@ +module LargeAlloc + +open Steel.Effect.Atomic +open Steel.Effect +module A = Steel.Array + +module US = FStar.SizeT +module U32 = FStar.UInt32 +module U8 = FStar.UInt8 + +open Constants +open Utils2 +open NullOrVarray + +inline_for_extraction noextract +let mmap_actual_size = PtrdiffWrapper.mmap_actual_size + +val large_malloc (size: US.t) + : Steel (array U8.t) + emp + (fun ptr -> null_or_varray ptr) + (requires fun _ -> + US.v size > 0 /\ + US.v size > U32.v page_size /\ + US.fits (US.v size + U32.v page_size) + ) + (ensures fun _ ptr h1 -> + let s : (t_of (null_or_varray ptr)) + = h1 (null_or_varray ptr) in + not (A.is_null ptr) ==> ( + US.fits (US.v size + U32.v page_size) /\ + (let size' = mmap_actual_size size in + A.length ptr == US.v size' /\ + A.is_full_array ptr /\ + array_u8_alignment ptr page_size /\ + zf_u8 s + )) + ) + +val large_free (ptr: array U8.t) + : Steel bool + (A.varray ptr) + (fun b -> if b then emp else A.varray ptr) + (requires fun _ -> True) + (ensures fun _ _ _ -> True) + +val large_getsize (ptr: array U8.t) + : Steel US.t + (A.varray ptr) (fun _ -> A.varray ptr) + (requires fun _ -> True) + (ensures fun h0 r h1 -> + A.asel ptr h1 == A.asel ptr h0 /\ + (US.v r > 0 ==> + A.length ptr == US.v r /\ + US.v r > U32.v page_size + ) + ) diff --git a/src/MemoryTrap.fsti b/src/MemoryTrap.fst similarity index 92% rename from src/MemoryTrap.fsti rename to src/MemoryTrap.fst index 2d08d8f9..120e5f50 100644 --- a/src/MemoryTrap.fsti +++ b/src/MemoryTrap.fst @@ -23,22 +23,22 @@ open Config /// this predicate is abstract, does not expose any /// primitive to use it, and can only be introduced /// through the mmap_trap function below -val trap_array (arr: array U8.t) : vprop +assume val trap_array (arr: array U8.t) : vprop /// Introduction function for the abstract `trap_array` /// predicate above. Under the hood, this function will /// be implemented in C as a mmap(PROT_NONE) -val mmap_strict_trap +assume val mmap_strict_trap (arr: array U8.t) (len: US.t{US.v len == A.length arr /\ US.v len > 0}) : SteelT unit (A.varray arr) (fun _ -> trap_array arr) -/// + /// Introduction function for the abstract `trap_array` /// predicate above. Under the hood, this function will /// be implemented in C as a madvise(MADV_DONTNEED) -val mmap_trap +assume val mmap_trap (arr: array U8.t) (len: US.t{US.v len == A.length arr /\ US.v len > 0}) : SteelT unit @@ -48,7 +48,7 @@ val mmap_trap /// Elimination function for the abstract `trap_array` /// predicate above. Under the hood, this function will /// be implemented in C as a mmap(PROT_READ|PROT_WRITE) -val mmap_strict_untrap +assume val mmap_strict_untrap (arr: array U8.t) (len: US.t{US.v len == A.length arr /\ US.v len > 0}) : SteelT unit @@ -58,7 +58,7 @@ val mmap_strict_untrap /// Elimination function for the abstract `trap_array` /// predicate above. Under the hood, this function will /// be implemented in C as a noop. -val mmap_untrap +assume val mmap_untrap (arr: array U8.t) (len: US.t{US.v len == A.length arr /\ US.v len > 0}) : SteelT unit diff --git a/src/Mman.fst b/src/Mman.fst index bc0d92d6..7c69e450 100644 --- a/src/Mman.fst +++ b/src/Mman.fst @@ -16,6 +16,8 @@ open Constants open Config open Utils2 +/// Memory management axiomatizations + /// 1) Initialization of the allocator // all functions with a _init suffix // are only meant to be used at initialization @@ -104,9 +106,6 @@ assume val mmap_sizes_init (len: US.t) A.is_full_array r ) -// used in src/LargeAlloc.fst -// mmap_ptr_metadata - /// 2) Large allocations wrappers open NullOrVarray @@ -116,7 +115,7 @@ open PtrdiffWrapper // POSIX spec: mmap returns a page-aligned array of bytes; // thus, mmap_u8 returns a page-aligned array of bytes; // hence the postcondition array_u8_alignment page_size -//noextract +/// mmap syscall wrapper assume val mmap_u8 (size: US.t) : Steel (array U8.t) @@ -134,8 +133,7 @@ assume val mmap_u8 ) ) -// TODO: should the underlying munmap fail in a stricter manner? -//noextract +/// munmap syscall wrapper assume val munmap_u8 (ptr: array U8.t) (size: US.t) : Steel unit (A.varray ptr) diff --git a/src/Mman2.fst b/src/Mman2.fst new file mode 100644 index 00000000..56539b66 --- /dev/null +++ b/src/Mman2.fst @@ -0,0 +1,15 @@ +module Mman2 + +open Steel.Effect.Atomic +open Steel.Effect +module R = Steel.Reference + +open Impl.Trees.Types +module G = FStar.Ghost + +/// Memory management axiomatizations + +assume val mmap_ptr_metadata_init (_:unit) + : SteelT (R.ref t) + emp + (fun r -> R.vptr r) diff --git a/src/SlotsFree.fst b/src/SlotsFree.fst index 9593c3d9..037779db 100644 --- a/src/SlotsFree.fst +++ b/src/SlotsFree.fst @@ -566,7 +566,9 @@ let u32_bounded #restart-solver -assume val deallocate_zeroing +open ExternUtils + +val deallocate_zeroing (size_class: sc) (ptr: array U8.t) : Steel unit @@ -578,6 +580,14 @@ assume val deallocate_zeroing enable_zeroing_free ==> zf_u8 (A.asel ptr h1) ) +let deallocate_zeroing size_class ptr + = + if enable_zeroing_free then ( + apply_zeroing_u8 ptr (US.uint32_to_sizet size_class) + ) else ( + return () + ) + //TODO: check for spec //CAUTION #push-options "--z3rlimit 100 --compat_pre_typed_indexed_effects" diff --git a/src/StarMalloc.fst b/src/StarMalloc.fst index d7deff0c..10056811 100644 --- a/src/StarMalloc.fst +++ b/src/StarMalloc.fst @@ -52,11 +52,6 @@ val malloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) (size: US.t) ) #pop-options -assume val malloc_zeroing_die (ptr: array U8.t) - : SteelT unit - (A.varray ptr) - (fun _ -> emp) - module G = FStar.Ghost //TODO: [@ CConst] @@ -78,7 +73,7 @@ let malloc arena_id size = intro_live_null_or_varray ptr; return ptr ) else ( - malloc_zeroing_die ptr; + FatalError.die_from_malloc_zeroing_check_failure ptr; intro_null_null_or_varray #U8.t ) ) @@ -144,7 +139,8 @@ let aligned_alloc arena_id alignment size intro_live_null_or_varray ptr; return ptr ) else ( - malloc_zeroing_die ptr; + // TODO: distinct case from malloc? + FatalError.die_from_malloc_zeroing_check_failure ptr; intro_null_null_or_varray #U8.t ) ) @@ -313,15 +309,36 @@ let getsize (ptr: array U8.t) module G = FStar.Ghost noextract -let return_status = x:nat{x < 3} +let return_status = x:nat{x < 8} + +let realloc_small_optim_lemma + (old_size: US.t) + (new_size: US.t) + : Lemma + (requires enable_sc_fast_selection /\ + US.v old_size <= U32.v page_size /\ + US.v new_size <= US.v threshold /\ + (let old_idx = sc_selection (US.sizet_to_uint32 old_size) in + let old_sc = L.index sc_list (US.v old_idx) in + let new_idx = if enable_slab_canaries_malloc + then sc_selection (US.sizet_to_uint32 (US.add new_size 2sz)) + else sc_selection (US.sizet_to_uint32 new_size) in + (enable_slab_canaries_malloc ==> + US.v old_size == U32.v old_sc - 2) /\ + (not enable_slab_canaries_malloc ==> + US.v old_size == U32.v old_sc) /\ + old_idx = new_idx) + ) + (ensures + US.v new_size <= US.v old_size + ) + = + () + // in case of failure, this vprop describes // memory that is still allocated but not returned -// TODO: doc this better, realloc conforms to C DR 400 -// realloc(NULL,0) only gives NULL on alloc failure -// (errno is ENOMEM) -// realloc(ptr,0) only gives NULL on alloc failure, ptr unchanged -// (errno is ENOMEM) +// TODO: realloc(NULL, 0) could be POSIX or C23, deallocation is deprecated // TODO: errno let realloc_vp (status: return_status) @@ -329,20 +346,28 @@ let realloc_vp (status: return_status) (new_ptr: array U8.t) : vprop = match status with - // success + // realloc(NULL, new_size): allocation, new_ptr should be returned | 0 -> emp - // new allocation fails, cannot memcpy, - // returning previous allocation - | 1 -> null_or_varray ptr - // new_allocation succeeds, freeing old allocation fails - // returning null - | 2 -> null_or_varray ptr `star` null_or_varray new_ptr + // realloc(ptr, 0): deallocation success, NULL should be returned + | 1 -> emp + // realloc(ptr, 0): deallocation failure, fatal_error (invalid free) + | 2 -> emp + // realloc(invalid pointer, ...): failure, fatal_error (invalid realloc) + | 3 -> emp + // realloc(valid pointer, ...): optim case (success), ptr should be returned + | 4 -> emp + // realloc(valid pointer, ...): allocation failure, NULL should be returned + | 5 -> null_or_varray ptr + // realloc(valid pointer, ...): success, new_ptr should be returned + | 6 -> emp + // realloc(valid pointer, ...): deallocation failure, fatal_error (invalid free) + | 7 -> null_or_varray new_ptr #restart-solver #push-options "--fuel 1 --ifuel 1" -#push-options "--z3rlimit 100 --compat_pre_typed_indexed_effects" -val realloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) +inline_for_extraction noextract +val realloc_standard_case (arena_id:US.t{US.v arena_id < US.v nb_arenas}) (ptr: array U8.t) (new_size: US.t) : Steel (array U8.t & G.erased (return_status & array U8.t)) ( @@ -358,7 +383,9 @@ val realloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) ) (requires fun _ -> within_size_classes_pred ptr /\ - US.fits (US.v new_size + U32.v page_size) + US.fits (US.v new_size + U32.v page_size) /\ + not (A.is_null ptr) /\ + US.v new_size > 0 ) (ensures fun h0 r h1 -> let s0 : t_of (null_or_varray ptr) @@ -377,37 +404,147 @@ val realloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) Seq.slice s0 0 (min size (US.v new_size)) ) )) - ) + ) /\ + True ) - // On failure, returns a null pointer. - // The original pointer ptr remains valid and may need to be deallocated with free or realloc. - //TODO: add corresponding postcond +#pop-options -let realloc_small_optim_lemma - (old_size: US.t) - (new_size: US.t) - : Lemma - (requires enable_sc_fast_selection /\ - US.v old_size <= U32.v page_size /\ - US.v new_size <= US.v threshold /\ - (let old_idx = sc_selection (US.sizet_to_uint32 old_size) in - let old_sc = L.index sc_list (US.v old_idx) in - let new_idx = if enable_slab_canaries_malloc +#restart-solver + +#push-options "--fuel 0 --ifuel 0 --z3rlimit 200" +let realloc_standard_case arena_id ptr new_size + = + elim_live_null_or_varray ptr; + let old_size = getsize ptr in + let old_allocation_is_small : bool = US.lte old_size (u32_to_sz page_size) in + let new_allocation_is_small : bool = US.lte new_size threshold in + let same_case : bool = old_allocation_is_small = new_allocation_is_small in + let small_case_optim_condition = enable_sc_fast_selection && old_allocation_is_small && same_case && ( + let old_sc = sc_selection (US.sizet_to_uint32 old_size) in + let new_sc = if enable_slab_canaries_malloc then sc_selection (US.sizet_to_uint32 (US.add new_size 2sz)) else sc_selection (US.sizet_to_uint32 new_size) in - (enable_slab_canaries_malloc ==> - US.v old_size == U32.v old_sc - 2) /\ - (not enable_slab_canaries_malloc ==> - US.v old_size == U32.v old_sc) /\ - old_idx = new_idx) + old_sc = new_sc) in + let large_case_optim_condition = (not old_allocation_is_small) && same_case && ( + US.lte new_size old_size + ) in + // not a valid pointer from the allocator point of view, fail + if (old_size = 0sz) then ( + // 3) invalid pointer, fatal_error + FatalError.die_from_realloc_invalid_previous_alloc ptr; + let r = intro_null_null_or_varray #U8.t in + change_equal_slprop + emp + (realloc_vp 3 ptr (A.null #U8.t)); + return (r, G.hide (3, A.null #U8.t)) + ) else ( + // most common case + if small_case_optim_condition then realloc_small_optim_lemma old_size new_size else (); + assert (small_case_optim_condition ==> A.length ptr >= US.v new_size); + assert (large_case_optim_condition ==> A.length ptr >= US.v new_size); + + if (small_case_optim_condition || large_case_optim_condition) then ( + // 4) optimization + (**) intro_live_null_or_varray ptr; + (**) change_equal_slprop + emp + (realloc_vp 4 (A.null #U8.t) (A.null #U8.t)); + return (ptr, G.hide (4, A.null #U8.t)) + ) else ( + // reallocation classical malloc/memcpy/free sequence of operations + let new_ptr = malloc arena_id new_size in + if (A.is_null new_ptr) then ( + // If the function fails to allocate the requested block + // of memory, a null pointer is returned, and the memory + // block pointed to by argument ptr is not deallocated + // (it is still valid, and with its contents unchanged). + // 5) allocation failure + (**) elim_null_null_or_varray new_ptr; + (**) intro_live_null_or_varray ptr; + change_equal_slprop + (null_or_varray ptr) + (realloc_vp 5 ptr (A.null #U8.t)); + let r = intro_null_null_or_varray #U8.t in + return (r, G.hide (5, A.null #U8.t)) + ) else ( + // The content of the memory block is preserved up to the + // lesser of the new and old sizes, even if the block is + // moved to a new location. If the new size is larger, the + // value of the newly allocated portion is indeterminate. + (**) elim_live_null_or_varray new_ptr; + assert (A.length new_ptr >= US.v new_size); + let min_of_sizes = + if US.lte new_size old_size + then new_size + else old_size in + let _ = memcpy_u8 new_ptr ptr min_of_sizes in + (**) intro_live_null_or_varray new_ptr; + (**) intro_live_null_or_varray ptr; + let b = free ptr in + if b then ( + // 6) full reallocation success + change_equal_slprop + (if b then emp else A.varray ptr) emp; + change_equal_slprop + emp + (realloc_vp 0 ptr (A.null #U8.t)); + return (new_ptr, G.hide (6, A.null #U8.t)) + ) else ( + // 7) deallocation failure, fatal_error + change_equal_slprop + (if b then emp else A.varray ptr) + (A.varray ptr); + FatalError.die_from_realloc_free_failure ptr; + let r = intro_null_null_or_varray #U8.t in + return (r, G.hide (7, new_ptr)) + ) + ) + ) ) - (ensures - US.v new_size <= US.v old_size +#pop-options + +#push-options "--fuel 1 --ifuel 1" +val realloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) + (ptr: array U8.t) (new_size: US.t) + : Steel (array U8.t & G.erased (return_status & array U8.t)) + ( + null_or_varray ptr `star` + (A.varray (A.split_l sc_all.slab_region 0sz) `star` + A.varray (A.split_r sc_all.slab_region slab_region_size)) + ) + (fun r -> + null_or_varray (fst r) `star` + (realloc_vp (fst (G.reveal (snd r))) ptr (snd (G.reveal (snd r))) `star` + A.varray (A.split_l sc_all.slab_region 0sz) `star` + A.varray (A.split_r sc_all.slab_region slab_region_size)) + ) + (requires fun _ -> + within_size_classes_pred ptr /\ + US.fits (US.v new_size + U32.v page_size) + ) + (ensures fun h0 r h1 -> + let s0 : t_of (null_or_varray ptr) + = h0 (null_or_varray ptr) in + let s1 : t_of (null_or_varray (fst r)) + = h1 (null_or_varray (fst r)) in + // success + not (A.is_null (fst r)) ==> ( + A.length (fst r) >= US.v new_size /\ + //array_u8_alignment (fst r) 16ul /\ + (not (A.is_null ptr) ==> ( + (enable_slab_canaries_malloc ==> A.length ptr >= 2) /\ + (let size = spec_getsize (A.length ptr) in + Seq.slice s1 0 (min size (US.v new_size)) + == + Seq.slice s0 0 (min size (US.v new_size)) + ) + )) + ) /\ + // optimization case + //(G.reveal (fst (snd r)) = 3 ==> fst r == ptr) + True ) - = - () -#push-options "--fuel 0 --ifuel 0 --z3rlimit 100" let realloc arena_id ptr new_size = if A.is_null ptr then ( @@ -415,129 +552,39 @@ let realloc arena_id ptr new_size // In case that ptr is a null pointer, the function behaves // like malloc, assigning a new block of size bytes and // returning a pointer to its beginning. - elim_null_null_or_varray ptr; + (**) elim_null_null_or_varray ptr; let new_ptr = malloc arena_id new_size in - change_equal_slprop + (**) change_equal_slprop emp (realloc_vp 0 (A.null #U8.t) (A.null #U8.t)); return (new_ptr, G.hide (0, A.null #U8.t)) ) else (if (new_size = 0sz) then ( // 2) realloc(ptr, 0sz) // freeing + // TODO: POSIX or C23: realloc(ptr, 0) does not free memory, add corresponding config switch let b = free ptr in if b then ( (**) change_equal_slprop (if b then emp else A.varray ptr) emp; (**) change_equal_slprop emp - (realloc_vp 0 ptr (A.null #U8.t)); + (realloc_vp 1 ptr (A.null #U8.t)); let r = intro_null_null_or_varray #U8.t in - return (r, G.hide (0, A.null #U8.t)) + return (r, G.hide (1, A.null #U8.t)) ) else ( - //TODO: add a die(), internal error - //change_equal_slprop - // (if b then emp else A.varray ptr) - // (A.varray ptr); - //intro_live_null_or_varray ptr; - //change_equal_slprop - // (null_or_varray ptr) - // (realloc_vp 1 ptr A.null #U8.t); + (**) change_equal_slprop + (if b then emp else A.varray ptr) + (A.varray ptr); + FatalError.die_from_realloc_free_failure ptr; let r = intro_null_null_or_varray #U8.t in - sladmit (); - return (r, G.hide (1, A.null #U8.t)) + (**) change_equal_slprop + emp + (realloc_vp 2 (A.null #U8.t) (A.null #U8.t)); + return (r, G.hide (2, A.null #U8.t)) ) ) else ( - elim_live_null_or_varray ptr; - let old_size = getsize ptr in - let old_allocation_is_small : bool = US.lte old_size (u32_to_sz page_size) in - let new_allocation_is_small : bool = US.lte new_size threshold in - let same_case : bool = old_allocation_is_small = new_allocation_is_small in - let small_case_optim_condition = enable_sc_fast_selection && old_allocation_is_small && same_case && ( - let old_sc = sc_selection (US.sizet_to_uint32 old_size) in - let new_sc = if enable_slab_canaries_malloc - then sc_selection (US.sizet_to_uint32 (US.add new_size 2sz)) - else sc_selection (US.sizet_to_uint32 new_size) in - old_sc = new_sc) in - let large_case_optim_condition = (not old_allocation_is_small) && same_case && ( - US.lte new_size old_size - ) in - // not a valid pointer from the allocator point of view, fail - if (old_size = 0sz) then ( - // 3) invalid pointer, fail - //TODO: add a die(), internal error - sladmit (); - //change_equal_slprop - // (null_or_varray ptr `star` null_or_varray new_ptr) - // (realloc_vp 2 ptr new_ptr); - //let r = intro_null_null_or_varray #U8.t in - //return (r, G.hide (2, new_ptr)) - return (A.null #U8.t, G.hide (0, A.null #U8.t)) - ) else ( - // most common case - if small_case_optim_condition then realloc_small_optim_lemma old_size new_size else (); - assert (small_case_optim_condition ==> A.length ptr >= US.v new_size); - assert (large_case_optim_condition ==> A.length ptr >= US.v new_size); - if (small_case_optim_condition || large_case_optim_condition) then ( - // optimization - (**) intro_live_null_or_varray ptr; - (**) change_equal_slprop - emp - (realloc_vp 0 (A.null #U8.t) (A.null #U8.t)); - return (ptr, G.hide (0, A.null #U8.t)) - ) else ( - // reallocation classical malloc/memcpy/free sequence of operations - let new_ptr = malloc arena_id new_size in - if (A.is_null new_ptr) then ( - // If the function fails to allocate the requested block - // of memory, a null pointer is returned, and the memory - // block pointed to by argument ptr is not deallocated - // (it is still valid, and with its contents unchanged). - (**) elim_null_null_or_varray new_ptr; - (**) intro_live_null_or_varray ptr; - change_equal_slprop - (null_or_varray ptr) - (realloc_vp 1 ptr (A.null #U8.t)); - let r = intro_null_null_or_varray #U8.t in - return (r, G.hide (1, A.null #U8.t)) - ) else ( - // The content of the memory block is preserved up to the - // lesser of the new and old sizes, even if the block is - // moved to a new location. If the new size is larger, the - // value of the newly allocated portion is indeterminate. - (**) elim_live_null_or_varray new_ptr; - assert (A.length new_ptr >= US.v new_size); - let min_of_sizes = - if US.lte new_size old_size - then new_size - else old_size in - let _ = memcpy_u8 new_ptr ptr min_of_sizes in - (**) intro_live_null_or_varray new_ptr; - (**) intro_live_null_or_varray ptr; - let b = free ptr in - if b then ( - change_equal_slprop - (if b then emp else A.varray ptr) emp; - change_equal_slprop - emp - (realloc_vp 0 ptr (A.null #U8.t)); - return (new_ptr, G.hide (0, A.null #U8.t)) - ) else ( - //TODO: add a die(), internal error - change_equal_slprop - (if b then emp else A.varray ptr) - (A.varray ptr); - intro_live_null_or_varray ptr; - change_equal_slprop - (null_or_varray ptr `star` null_or_varray new_ptr) - (realloc_vp 2 ptr new_ptr); - let r = intro_null_null_or_varray #U8.t in - return (r, G.hide (2, new_ptr)) - ) - ) - ) - ) + realloc_standard_case arena_id ptr new_size )) -#pop-options #push-options "--fuel 1 --ifuel 1 --z3rlimit 200" //TODO: there should be defensive checks and no precondition diff --git a/src/Utils2.fsti b/src/Utils2.fst similarity index 96% rename from src/Utils2.fsti rename to src/Utils2.fst index 1fce8db2..d0852687 100644 --- a/src/Utils2.fsti +++ b/src/Utils2.fst @@ -27,28 +27,8 @@ unfold let same_base_array (#a: Type) (arr1 arr2: array a) noextract unfold let slab_metadata = r:array U64.t{A.length r = 4} -// abstract property that the underlying pointer v-bytes aligned -//assume -val array_u8_alignment (arr: array U8.t) (v: U32.t{U32.v v > 0}): prop - -// no model for the memory considered as the "root" array in a tree-like representation -// thus, this *axiom* is required -// to convey that if arr (of type uint8_t*) is v1-bytes aligned, -// that v1 is a multiple of v2, -// then forall k, -// then arr[k * v2] is v2-bytes aligned -//assume -val array_u8_alignment_lemma - (arr1 arr2: array U8.t) - (v1 v2: (v:U32.t{U32.v v > 0})) - : Lemma - (requires - same_base_array arr1 arr2 /\ - array_u8_alignment arr1 v1 /\ - (U32.v v1) % (U32.v v2) == 0 /\ - (A.offset (A.ptr_of arr2) - A.offset (A.ptr_of arr1)) % (U32.v v2) == 0) - (ensures - array_u8_alignment arr2 v2) +let array_u8_alignment = ArrayAlignment.array_u8_alignment +let array_u8_alignment_lemma = ArrayAlignment.array_u8_alignment_lemma #push-options "--z3rlimit 30" let nb_slots (size_class: sc)