From f28b3886813dc4b961c674b8fa3f7cf62454dc32 Mon Sep 17 00:00:00 2001 From: Antonin Reitz Date: Mon, 2 Sep 2024 04:06:00 +0200 Subject: [PATCH] Main2.fst mostly clean, fix extraction still a few verif issues --- Makefile | 2 + src/Config.fst | 6 +- src/Config.fsti | 3 +- src/Main.Meta.fst | 4 +- src/Main.fst | 8 +- src/Main.fsti | 2 +- src/Main2.fst | 445 ++++++++++++++++++++++++++++++++++------------ src/Main2.fsti | 9 +- src/SizeClass.fst | 2 +- 9 files changed, 354 insertions(+), 127 deletions(-) diff --git a/Makefile b/Makefile index fd49b99..8cd0422 100644 --- a/Makefile +++ b/Makefile @@ -87,6 +87,8 @@ EXT_VENDOR_FILES = vendor/steel/src/c/steel_spinlock.c # TODO: improve this #dist/Mman.c leads to issue, moving non-extern code apart FILES = \ + dist/Config.c \ + dist/Constants.c \ dist/ArrayList.c \ dist/krmlinit.c \ dist/StarMalloc.c \ diff --git a/src/Config.fst b/src/Config.fst index 4f299e8..f8ed8ad 100644 --- a/src/Config.fst +++ b/src/Config.fst @@ -201,14 +201,14 @@ let metadata_max = US.of_u64 metadata_max' //DO NOT EDIT -let sc_slab_region_size = US.mul metadata_max (US.uint32_to_sizet page_size) +let sc_slab_region_size = normalize_term (US.mul metadata_max (US.uint32_to_sizet page_size)) //DO NOT EDIT -let full_slab_region_size = US.mul sc_slab_region_size (US.mul nb_size_classes nb_arenas) +let full_slab_region_size = normalize_term (US.mul sc_slab_region_size (US.mul nb_size_classes nb_arenas)) //DO NOT EDIT let metadata_max_ex - = US.div metadata_max (US.mul max_sc_coef 2sz) + = normalize_term (US.div metadata_max (US.mul max_sc_coef 2sz)) //DO NOT EDIT let metadata_max_up_fits _ = diff --git a/src/Config.fsti b/src/Config.fsti index 5c3f106..3171424 100644 --- a/src/Config.fsti +++ b/src/Config.fsti @@ -144,7 +144,8 @@ inline_for_extraction val metadata_max_ex: v:US.t{ US.v v > 0 /\ US.v v <= US.v metadata_max /\ - US.fits (US.v v * US.v sc_ex_slab_size) + US.fits (US.v v * US.v sc_ex_slab_size) /\ + US.v v * US.v sc_ex_slab_size == US.v sc_slab_region_size } //let slab_size diff --git a/src/Main.Meta.fst b/src/Main.Meta.fst index 5b06ce8..f961c77 100644 --- a/src/Main.Meta.fst +++ b/src/Main.Meta.fst @@ -371,11 +371,13 @@ val allocate_size_class not (is_null r) ==> ( A.length r == U32.v size /\ array_u8_alignment r 16ul /\ - ((U32.v size > 0 /\ (U32.v page_size) % (U32.v size) == 0) ==> array_u8_alignment r size) + ((U32.v size > 0 /\ (U32.v page_size) % (U32.v size) == 0) ==> array_u8_alignment r size) /\ + True ) ) let allocate_size_class _ scs = + admit (); Main.allocate_size_class scs #push-options "--fuel 0 --ifuel 0 --z3rlimit 50" diff --git a/src/Main.fst b/src/Main.fst index 6bccf67..6f32bba 100644 --- a/src/Main.fst +++ b/src/Main.fst @@ -482,7 +482,6 @@ let init_struct_aux r_idxs 0sz `star` right_vprop slab_region md_bm_region md_region 0sz); - assume (US.v sc_slab_region_size == US.v metadata_max_ex * US.v sc_ex_slab_size); [@inline_let] let scs = { size = Sc sc; @@ -584,7 +583,6 @@ let init_struct_aux2 r_idxs 0sz `star` right_vprop slab_region md_bm_region md_region 0sz); - assume (US.v sc_slab_region_size == US.v metadata_max_ex * US.v sc_ex_slab_size); [@inline_let] let scs = { size = Sc_ex sc; @@ -3255,7 +3253,7 @@ val init_one_arena2 US.v arena_slab_region_size % U32.v page_size == 0 /\ US.fits (US.v n * (US.v offset + 1)) /\ TLA.length sizes >= US.v n * (US.v offset + 1) /\ - US.v n == US.v n1 + US.v n2 /\ + //US.v n == US.v n1 + US.v n2 /\ //(forall (i:US.t{US.v i < US.v n1}). Sc? (TLA.get sizes (US.add offset i))) /\ //(forall (i:US.t{US.v i < US.v n2}). Sc_ex? (TLA.get sizes (US.add (US.add offset n1) i))) /\ A.length size_classes >= US.v n /\ @@ -4488,6 +4486,8 @@ let init_nth_arena_inv_lemma1 (#opened:_) synced_sizes_arena_le_lemma n 0sz s0 s01 sizes (US.v k) (US.v k); size_class_preds_arena_le_lemma n arena_slab_region_size s0 s01 (US.v k) slab_region (US.v k) +#restart-solver + let init_nth_arena_inv_lemma2 (n: US.t{US.v n > 0 /\ True @@ -4544,6 +4544,8 @@ let init_nth_arena_inv_lemma2 assert (size_class_preds s12 (US.v n * 1) (A.split_r slab_region (US.mul arena_slab_region_size k))); reveal_opaque (`%size_class_preds) size_class_preds +#restart-solver + let init_nth_arena_inv l1 l2 n1 n2 n arena_slab_region_size diff --git a/src/Main.fsti b/src/Main.fsti index f6368f4..ffe6b0a 100644 --- a/src/Main.fsti +++ b/src/Main.fsti @@ -256,11 +256,11 @@ val allocate_size_class array_u8_alignment r 16ul /\ (scs.is_extended ==> ( (A.offset (A.ptr_of r) - A.offset (A.ptr_of scs.slab_region)) % US.v sc_ex_slab_size == 0 /\ - //((U32.v page_size) % (U32.v scs.size) == 0 ==> array_u8_alignment r scs.size) True )) /\ (not scs.is_extended ==> ( ((A.offset (A.ptr_of r) - A.offset (A.ptr_of scs.slab_region)) % U32.v page_size) % (U32.v (get_u32 scs.size)) == 0 /\ + (U32.v page_size) % (U32.v (get_u32 scs.size)) == 0 ==> array_u8_alignment r (get_u32 scs.size) /\ True //array_u8_alignment r sc_ex_slab_size )) diff --git a/src/Main2.fst b/src/Main2.fst index 8445ed2..687b177 100644 --- a/src/Main2.fst +++ b/src/Main2.fst @@ -180,8 +180,13 @@ let slab_free' (i:US.t{US.v i < US.v nb_size_classes * US.v nb_arenas}) (ptr: ar let diff' = offset (ptr_of ptr) - offset (ptr_of sc.data.slab_region) in same_base_array ptr sc.data.slab_region /\ 0 <= diff' /\ - diff' < US.v slab_size /\ - (diff' % U32.v page_size) % U32.v (get_u32 sc.data.size) == 0 /\ + (sc.data.is_extended ==> + diff' % US.v sc_ex_slab_size == 0 + ) /\ + (not sc.data.is_extended ==> + (diff' % U32.v page_size) % U32.v (get_u32 sc.data.size) == 0 + ) /\ + diff' < US.v sc_slab_region_size /\ A.length ptr == U32.v (get_u32 sc.data.size) /\ US.v diff = diff' )) @@ -212,60 +217,61 @@ let slab_free' (i:US.t{US.v i < US.v nb_size_classes * US.v nb_arenas}) (ptr: ar /// entire memory provided by the allocator: /// If a pointer is within a size class and aligned with /// the slots, then its length corresponds to the slot size -let within_size_class_i (ptr:A.array U8.t) (sc: size_class_struct) : prop = ( - // If ptr is within the size class sc - same_base_array sc.slab_region ptr /\ - A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc.slab_region)) >= 0 /\ - A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc.slab_region)) < US.v slab_size /\ - // and it is aligned on the slots - ((A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc.slab_region))) % U32.v page_size) % U32.v (get_u32 sc.size) = 0) ==> +let within_size_class_i (ptr:A.array U8.t) (sc: size_class_struct) : prop = + ( + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc.slab_region)) in + // If ptr is within the size class sc + same_base_array sc.slab_region ptr /\ + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + // and it is aligned on the slots + (sc.is_extended ==> + diff % US.v sc_ex_slab_size == 0 + ) /\ + (not sc.is_extended ==> + (diff % U32.v page_size) % U32.v (get_u32 sc.size) == 0 + ) + ) ==> // then its length is the length of a slot A.length ptr == U32.v (get_u32 sc.size) -#push-options "--fuel 0 --ifuel 1 --z3rlimit 150" +#restart-solver + +#push-options "--fuel 0 --ifuel 1 --z3rlimit 150 --quake 10/10" /// Elimination lemma for `within_size_class_i`, triggering F* to prove the precondition /// of the implication -let elim_within_size_class_i (ptr:A.array U8.t) (i:nat{i < Seq.length sc_all.g_size_classes}) (size:sc) +let elim_within_size_class_i + (ptr:A.array U8.t) + (i:nat{i < Seq.length sc_all.g_size_classes}) + (size:U32.t) : Lemma (requires ( let sc : size_class = G.reveal (Seq.index sc_all.g_size_classes i) in - size == get_u32 sc.data.size /\ + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc.data.slab_region)) in within_size_class_i ptr sc.data /\ + size == get_u32 sc.data.size /\ same_base_array sc.data.slab_region ptr /\ - A.offset (A.ptr_of ptr) - A.offset (ptr_of (G.reveal sc.data.slab_region)) >= 0 /\ - A.offset (A.ptr_of ptr) - A.offset (ptr_of (G.reveal sc.data.slab_region)) < US.v slab_size /\ - ((A.offset (A.ptr_of ptr) - A.offset (ptr_of (G.reveal sc_all.slab_region))) % U32.v page_size) % U32.v size = 0)) + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + (sc.data.is_extended ==> + diff % US.v sc_ex_slab_size == 0 + ) /\ + (not sc.data.is_extended ==> + (diff % U32.v page_size) % U32.v (get_u32 sc.data.size) == 0 + ) + )) (ensures ( let sc : size_class = G.reveal (Seq.index sc_all.g_size_classes i) in - ((A.offset (A.ptr_of ptr) - A.offset (ptr_of (G.reveal sc.data.slab_region))) % U32.v page_size) % U32.v size = 0 /\ A.length ptr == U32.v size )) = - let sc : size_class = G.reveal (Seq.index sc_all.g_size_classes i) in - let off_ptr = A.offset (A.ptr_of ptr) in - let off1 = A.offset (A.ptr_of (G.reveal sc_all.slab_region)) in - let off2 = A.offset (A.ptr_of (G.reveal sc.data.slab_region)) in - admit (); - assert (off2 - off1 = i * US.v slab_size); - assert (off_ptr - off1 = off_ptr - off2 + i * US.v slab_size); - assert (i * US.v slab_size == i * US.v metadata_max * U32.v page_size); - Math.Lemmas.lemma_mod_plus - (off_ptr - off2) - (i * US.v metadata_max) - (U32.v page_size); - assert ( - (off_ptr - off1) % U32.v page_size - == - (off_ptr - off2) % U32.v page_size - ) + () #pop-options let within_size_classes_pred (ptr:A.array U8.t) : prop = forall (i:nat{i < Seq.length sc_all.g_size_classes}). within_size_class_i ptr (Seq.index (G.reveal sc_all.g_size_classes) i).data -#restart-solver - #push-options "--fuel 0 --ifuel 0 --z3rlimit 50" let mod_lt (a b: US.t) : Lemma @@ -276,79 +282,146 @@ let mod_lt (a b: US.t) = () #pop-options -inline_for_extraction noextract -let u32_to_sz = Prelude.u32_to_sz - +open Prelude open Main +#push-options "--fuel 0 --ifuel 0 --z3rlimit 100 --query_stats" inline_for_extraction noextract -let slab_size = Main.sc_slab_region_size +val slab_getsize_aux_sc + (ptr: array U8.t) + (index: US.t) + (diff_sz: US.t) + (size: U32.t) + : Steel US.t + (A.varray ptr) + (fun _ -> A.varray ptr) + (requires fun h0 -> + US.v index < Seq.length sc_all.g_size_classes /\ + (let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region) in + let size_as_union = TLA.get sizes index in + Sc? size_as_union /\ + size == get_sc size_as_union /\ + size == get_u32 size_as_union /\ + size == get_u32 sc.data.size /\ + US.v diff_sz == diff /\ + same_base_array ptr sc.data.slab_region /\ + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + within_size_class_i ptr sc.data + )) + (ensures fun h0 result h1 -> + A.asel ptr h1 == A.asel ptr h0 /\ + US.v result <= U32.v page_size /\ + (let idx = sc_selection (US.sizet_to_uint32 result) in + (result <> 0sz ==> ( + A.length ptr == U32.v size /\ + (enable_slab_canaries_malloc ==> + A.length ptr == US.v result + 2 + ) /\ + (not enable_slab_canaries_malloc ==> + A.length ptr == US.v result + ) /\ + (enable_sc_fast_selection ==> + A.length ptr == U32.v (get_u32 (L.index sc_list (US.v idx))) + ) + )) + )) -#push-options "--fuel 0 --ifuel 0 --z3rlimit 100 --query_stats" -let slab_getsize ptr +let slab_getsize_aux_sc ptr index diff_sz size = - //TODO: obviously false, will generate incorrect output - admit (); - SAA.within_bounds_elim - (A.split_l sc_all.slab_region 0sz) - (A.split_r sc_all.slab_region slab_region_size) - ptr; - UP.fits_lt - (A.offset (A.ptr_of ptr) - - - A.offset (A.ptr_of (A.split_l sc_all.slab_region 0sz))) - (US.v slab_region_size); - let diff = A.ptrdiff - ptr - (A.split_l sc_all.slab_region 0sz) in - let diff_sz = UP.ptrdifft_to_sizet diff in - let index = US.div diff_sz slab_size in - lemma_div_lt (US.v slab_size) (US.v nb_size_classes) (US.v nb_arenas) (US.v diff_sz); - total_nb_sc_lemma (); - assert (US.v index < US.v nb_size_classes * US.v nb_arenas); - assert (TLA.length sizes == US.v nb_size_classes * US.v nb_arenas); - assert (US.v index < TLA.length sizes); - let size = get_u32 (TLA.get sizes index) in - sizes_t_pred_elim sizes; - let index' = G.hide (US.v index % US.v nb_size_classes) in - assert (size = get_u32 (L.index sc_list index')); - if enable_sc_fast_selection then ( - sc_selection_is_exact1 index'; - sc_selection_is_exact2 index'; - let index'' = G.hide (sc_selection size) in - assert (L.index sc_list (G.reveal index') == L.index sc_list (US.v (G.reveal index''))); - assert (size = get_u32 (L.index sc_list (US.v (G.reveal index'')))) - ) else (); - let rem_slab = US.rem diff_sz slab_size in - let rem_slot = US.rem diff_sz (u32_to_sz page_size) in - // TODO: some refactor needed wrt SlotsFree - if US.rem rem_slot (US.uint32_to_sizet size) = 0sz then ( - (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in - assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); - assert (A.offset (A.ptr_of (G.reveal sc).data.slab_region) == A.offset (A.ptr_of sc_all.slab_region) + (US.v index) * (US.v slab_size)); - assert (US.v rem_slab == A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc).data.slab_region)); - mod_lt diff_sz slab_size; - assert (US.v rem_slab < US.v slab_size); - mod_lt diff_sz (u32_to_sz page_size); - assert (US.v rem_slot == (A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc_all.slab_region)) % (U32.v page_size)); - mod_lt rem_slot (US.uint32_to_sizet size); + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); + // reveal synced_sizes + assume (not sc.data.is_extended); + let rem_page = US.rem diff_sz (u32_to_sz page_size) in + assert (US.v rem_page == (A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region)) % (U32.v page_size)); + if US.rem rem_page (u32_to_sz size) = 0sz then ( (**) elim_within_size_class_i ptr (US.v index) size; (**) assert (A.length ptr == U32.v size); + let index' = G.hide (US.v index % US.v nb_size_classes) in + sizes_t_pred_elim sizes; + assert (size = get_u32 (L.index sc_list index')); + if enable_sc_fast_selection then ( + sc_selection_is_exact1 index'; + sc_selection_is_exact2 index' + ) else (); if enable_slab_canaries_malloc then return (US.uint32_to_sizet (size `U32.sub` 2ul)) else return (US.uint32_to_sizet size) ) else ( - // invalid pointer return 0sz ) -#restart-solver +inline_for_extraction noextract +val slab_getsize_aux_sc_ex + (ptr: array U8.t) + (index: US.t) + (diff_sz: US.t) + (size: U32.t) + : Steel US.t + (A.varray ptr) + (fun _ -> A.varray ptr) + (requires fun h0 -> + US.v index < Seq.length sc_all.g_size_classes /\ + (let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region) in + let size_as_union = TLA.get sizes index in + Sc_ex? size_as_union /\ + size == get_sc_ex size_as_union /\ + size == get_u32 size_as_union /\ + size == get_u32 sc.data.size /\ + US.v diff_sz == diff /\ + same_base_array ptr sc.data.slab_region /\ + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + within_size_class_i ptr sc.data + )) + (ensures fun h0 result h1 -> + A.asel ptr h1 == A.asel ptr h0 /\ + US.v result <= max_sc_ex /\ + (let idx = sc_selection (US.sizet_to_uint32 result) in + (result <> 0sz ==> ( + A.length ptr == U32.v size /\ + (enable_slab_canaries_malloc ==> + A.length ptr == US.v result + 2 + ) /\ + (not enable_slab_canaries_malloc ==> + A.length ptr == US.v result + ) /\ + (enable_sc_fast_selection ==> + A.length ptr == U32.v (get_u32 (L.index sc_list (US.v idx))) + ) + )) + )) -let slab_free ptr +let slab_getsize_aux_sc_ex ptr index diff_sz size + = + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); + // reveal synced_sizes + assume (sc.data.is_extended); + if US.rem diff_sz sc_ex_slab_size = 0sz then ( + (**) elim_within_size_class_i ptr (US.v index) size; + (**) assert (A.length ptr == U32.v size); + let index' = G.hide (US.v index % US.v nb_size_classes) in + sizes_t_pred_elim sizes; + assert (size = get_u32 (L.index sc_list index')); + if enable_sc_fast_selection then ( + sc_selection_is_exact1 index'; + sc_selection_is_exact2 index' + ) else (); + if enable_slab_canaries_malloc then + return (US.uint32_to_sizet (size `U32.sub` 2ul)) + else + return (US.uint32_to_sizet size) + ) else ( + return 0sz + ) + +let slab_getsize ptr = - //TODO: obviously false, will generate incorrect output - admit (); SAA.within_bounds_elim (A.split_l sc_all.slab_region 0sz) (A.split_r sc_all.slab_region slab_region_size) @@ -362,26 +435,76 @@ let slab_free ptr ptr (A.split_l sc_all.slab_region 0sz) in let diff_sz = UP.ptrdifft_to_sizet diff in - let index = US.div diff_sz slab_size in - lemma_div_lt (US.v slab_size) (US.v nb_size_classes) (US.v nb_arenas) (US.v diff_sz); + let index = US.div diff_sz sc_slab_region_size in + lemma_div_lt (US.v sc_slab_region_size) (US.v nb_size_classes) (US.v nb_arenas) (US.v diff_sz); total_nb_sc_lemma (); - assert (US.v index < US.v nb_size_classes * US.v nb_arenas); - assert (TLA.length sizes == US.v nb_size_classes * US.v nb_arenas); - assert (US.v index < TLA.length sizes); - let size = get_u32 (TLA.get sizes index) in - let rem_slab = US.rem diff_sz slab_size in - let rem_slot = US.rem diff_sz (u32_to_sz page_size) in - // TODO: some refactor needed wrt SlotsFree - if US.rem rem_slot (US.uint32_to_sizet size) = 0sz then ( - (**) let sc = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in - assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); - assert (A.offset (A.ptr_of (G.reveal sc).data.slab_region) == A.offset (A.ptr_of sc_all.slab_region) + (US.v index) * (US.v slab_size)); - assert (US.v rem_slab == A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc).data.slab_region)); - mod_lt diff_sz slab_size; - assert (US.v rem_slab < US.v slab_size); - mod_lt diff_sz (u32_to_sz page_size); - assert (US.v rem_slot == (A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc_all.slab_region)) % (U32.v page_size)); - mod_lt rem_slot (US.uint32_to_sizet size); + assert (US.v index < total_nb_sc); + mod_lt index nb_size_classes; + let size_as_union = TLA.get sizes index in + let size = get_u32 size_as_union in + + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + // reveal size_class_pred + assume (A.offset (A.ptr_of (G.reveal sc).data.slab_region) == A.offset (A.ptr_of sc_all.slab_region) + US.v index * US.v sc_slab_region_size); + + let rem_sc = US.rem diff_sz sc_slab_region_size in + assert (US.v rem_sc == A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc).data.slab_region)); + + assume (size == get_u32 sc.data.size); + assume (same_base_array ptr sc.data.slab_region); + if Sc? size_as_union then + slab_getsize_aux_sc ptr index rem_sc size + else + slab_getsize_aux_sc_ex ptr index rem_sc size + +#restart-solver + +#restart-solver + +inline_for_extraction noextract +val slab_free_aux_sc + (ptr: array U8.t) + (index: US.t) + (diff_sz: US.t) + (size: U32.t) + : Steel bool + (A.varray ptr) + (fun b -> if b then emp else A.varray ptr) + (requires fun h0 -> + Seq.length sc_all.g_size_classes == US.v nb_size_classes * US.v nb_arenas /\ + US.v index < Seq.length sc_all.g_size_classes /\ + (let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region) in + let size_as_union = TLA.get sizes index in + Sc? size_as_union /\ + size == get_sc size_as_union /\ + size == get_u32 size_as_union /\ + size == get_u32 sc.data.size /\ + US.v diff_sz == diff /\ + same_base_array ptr sc.data.slab_region /\ + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + within_size_class_i ptr sc.data + )) + (ensures fun h0 r _ -> + let s = A.asel ptr h0 in + enable_slab_canaries_free ==> + (r ==> + A.length ptr >= 2 /\ + Seq.index s (A.length ptr - 2) == slab_canaries_magic1 /\ + Seq.index s (A.length ptr - 1) == slab_canaries_magic2 + ) + ) + +let slab_free_aux_sc ptr index diff_sz size + = + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); + // reveal synced_sizes + assume (not sc.data.is_extended); + let rem_page = US.rem diff_sz (u32_to_sz page_size) in + assert (US.v rem_page == (A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region)) % (U32.v page_size)); + if US.rem rem_page (u32_to_sz size) = 0sz then ( (**) elim_within_size_class_i ptr (US.v index) size; (**) assert (A.length ptr == U32.v size); if enable_slab_canaries_free then ( @@ -389,15 +512,111 @@ let slab_free ptr let magic1 = A.index ptr (US.uint32_to_sizet (size `U32.sub` 2ul)) in let magic2 = A.index ptr (US.uint32_to_sizet (size `U32.sub` 1ul)) in if magic1 = slab_canaries_magic1 && magic2 = slab_canaries_magic2 then - slab_free' index ptr rem_slab + slab_free' index ptr diff_sz else // Canary was overwritten return false ) else ( - slab_free' index ptr rem_slab + slab_free' index ptr diff_sz ) ) else ( return false ) -#pop-options + +inline_for_extraction noextract +val slab_free_aux_sc_ex + (ptr: array U8.t) + (index: US.t) + (diff_sz: US.t) + (size: U32.t) + : Steel bool + (A.varray ptr) + (fun b -> if b then emp else A.varray ptr) + (requires fun h0 -> + Seq.length sc_all.g_size_classes == US.v nb_size_classes * US.v nb_arenas /\ + US.v index < Seq.length sc_all.g_size_classes /\ + (let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + let diff = A.offset (A.ptr_of ptr) - A.offset (A.ptr_of sc.data.slab_region) in + let size_as_union = TLA.get sizes index in + Sc_ex? size_as_union /\ + size == get_sc_ex size_as_union /\ + size == get_u32 size_as_union /\ + size == get_u32 sc.data.size /\ + US.v diff_sz == diff /\ + same_base_array ptr sc.data.slab_region /\ + 0 <= diff /\ + diff < US.v sc_slab_region_size /\ + within_size_class_i ptr sc.data + )) + (ensures fun h0 r _ -> + let s = A.asel ptr h0 in + enable_slab_canaries_free ==> + (r ==> + A.length ptr >= 2 /\ + Seq.index s (A.length ptr - 2) == slab_canaries_magic1 /\ + Seq.index s (A.length ptr - 1) == slab_canaries_magic2 + ) + ) + +let slab_free_aux_sc_ex ptr index diff_sz size + = + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + assert (size_class_pred sc_all.slab_region (G.reveal sc) (US.v index)); + // reveal synced_sizes + assume (sc.data.is_extended); + if US.rem diff_sz sc_ex_slab_size = 0sz then ( + if enable_slab_canaries_free then ( + enable_slab_canaries_lemma (); + let magic1 = A.index ptr (US.uint32_to_sizet (size `U32.sub` 2ul)) in + let magic2 = A.index ptr (US.uint32_to_sizet (size `U32.sub` 1ul)) in + if magic1 = slab_canaries_magic1 && magic2 = slab_canaries_magic2 then + slab_free' index ptr diff_sz + else + // Canary was overwritten + return false + ) else ( + slab_free' index ptr diff_sz + ) + ) else ( + return false + ) + +let slab_free ptr + = + SAA.within_bounds_elim + (A.split_l sc_all.slab_region 0sz) + (A.split_r sc_all.slab_region slab_region_size) + ptr; + UP.fits_lt + (A.offset (A.ptr_of ptr) + - + A.offset (A.ptr_of (A.split_l sc_all.slab_region 0sz))) + (US.v slab_region_size); + let diff = A.ptrdiff + ptr + (A.split_l sc_all.slab_region 0sz) in + let diff_sz = UP.ptrdifft_to_sizet diff in + let index = US.div diff_sz sc_slab_region_size in + lemma_div_lt (US.v sc_slab_region_size) (US.v nb_size_classes) (US.v nb_arenas) (US.v diff_sz); + total_nb_sc_lemma (); + assert (US.v index < total_nb_sc); + mod_lt index nb_size_classes; + let size_as_union = TLA.get sizes index in + let size = get_u32 size_as_union in + + (**) let sc : G.erased size_class = G.hide (Seq.index (G.reveal sc_all.g_size_classes) (US.v index)) in + // reveal size_class_pred + assume (A.offset (A.ptr_of (G.reveal sc).data.slab_region) == A.offset (A.ptr_of sc_all.slab_region) + US.v index * US.v sc_slab_region_size); + + let rem_sc = US.rem diff_sz sc_slab_region_size in + assert (US.v rem_sc == A.offset (A.ptr_of ptr) - A.offset (A.ptr_of (G.reveal sc).data.slab_region)); + + assume (size == get_u32 sc.data.size); + assume (same_base_array ptr sc.data.slab_region); + assume (U32.v size > 0); + + if Sc? size_as_union then + slab_free_aux_sc ptr index rem_sc size + else + slab_free_aux_sc_ex ptr index rem_sc size diff --git a/src/Main2.fsti b/src/Main2.fsti index 73bfa78..d7145e9 100644 --- a/src/Main2.fsti +++ b/src/Main2.fsti @@ -58,6 +58,7 @@ val slab_aligned_alloc (arena_id:US.t{US.v arena_id < US.v nb_arenas}) (alignmen let s : t_of (null_or_varray r) = h1 (null_or_varray r) in U32.v alignment > 0 /\ + //TODO s/page_size/slab_size (U32.v page_size) % (U32.v alignment) = 0 /\ not (is_null r) ==> ( A.length r >= U32.v bytes /\ @@ -89,10 +90,9 @@ val slab_getsize (ptr: array U8.t) ) (ensures fun h0 result h1 -> A.asel ptr h1 == A.asel ptr h0 /\ - US.v result <= U32.v page_size /\ + US.v result <= max_sc_ex /\ + (let idx = sc_selection (US.sizet_to_uint32 result) in (result <> 0sz ==> ( - let idx = sc_selection (US.sizet_to_uint32 result) in - A.length ptr <= U32.v page_size /\ (enable_slab_canaries_malloc ==> A.length ptr == US.v result + 2 ) /\ @@ -100,7 +100,8 @@ val slab_getsize (ptr: array U8.t) A.length ptr == US.v result ) /\ (enable_sc_fast_selection ==> - A.length ptr == U32.v (get_u32 (L.index sc_list (US.v idx)))) + A.length ptr == U32.v (get_u32 (L.index sc_list (US.v idx))) + )) )) ) diff --git a/src/SizeClass.fst b/src/SizeClass.fst index 07317d7..06175c3 100644 --- a/src/SizeClass.fst +++ b/src/SizeClass.fst @@ -14,7 +14,7 @@ let slab_region_size_eq_lemma (_:unit) : Lemma (sc_slab_region_size = US.mul metadata_max_ex sc_ex_slab_size) = - admit () + () //inline_for_extraction noextract //let slab_size : (v:US.t{US.v v == US.v metadata_max * U32.v page_size /\ US.v v > 0})