From eb79b84cb922c6e64e843bc51781eebe52b7b173 Mon Sep 17 00:00:00 2001 From: Steven Eker Date: Mon, 9 Dec 2024 12:10:57 -0800 Subject: [PATCH 1/4] Further allocator refactoring (#1173) Dead code eliminated: youngspace_size() arena_size() union memory_block_header and associated functionality replaced. The semispace id is now stored in the last byte of the hyperblock which simplifies address computations. --- include/runtime/alloc.h | 1 - include/runtime/arena.h | 71 +++++++++++++++++------------------------ runtime/alloc/arena.cpp | 17 ++++------ runtime/lto/alloc.cpp | 4 --- 4 files changed, 35 insertions(+), 58 deletions(-) diff --git a/include/runtime/alloc.h b/include/runtime/alloc.h index d94ed0c5b..9208e5ee8 100644 --- a/include/runtime/alloc.h +++ b/include/runtime/alloc.h @@ -15,7 +15,6 @@ extern "C" { char youngspace_collection_id(void); char oldspace_collection_id(void); -size_t youngspace_size(void); // allocates exactly requested bytes into the young generation void *kore_alloc(size_t requested); diff --git a/include/runtime/arena.h b/include/runtime/arena.h index 3f71445e4..bac3968a3 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -27,32 +27,21 @@ class arena { void *kore_arena_alloc(size_t requested); // Returns the address of the first byte that belongs in the given arena. - // Returns 0 if nothing has been allocated ever in that arena. - char *arena_start_ptr() const { - return current_addr_ptr ? current_addr_ptr + sizeof(memory_block_header) - : nullptr; - } + // Returns nullptr if nothing has been allocated ever in that arena. + char *arena_start_ptr() const { return current_addr_ptr; } // Returns a pointer to a location holding the address of last allocated // byte in the given arena plus 1. - // This address is 0 if nothing has been allocated ever in that arena. + // This address is nullptr if nothing has been allocated ever in that arena. char **arena_end_ptr() { return &allocation_ptr; } - // return the total number of allocatable bytes currently in the arena in its - // active semispace. - size_t arena_size() const { - update_num_blocks(); - return BLOCK_SIZE * std::max(num_blocks, num_collection_blocks); - } - // Clears the current allocation space by setting its start back to its first // block. It is used during garbage collection to effectively collect all of the - // arena. + // arena. Resets the tripwire. void arena_clear(); - // Resizes the last allocation as long as the resize does not require a new - // block allocation. - // Returns the address of the byte following the last newlly allocated byte. + // Resizes the last allocation. + // Returns the address of the byte following the last newly allocated byte. void *arena_resize_last_alloc(ssize_t increase) { return (allocation_ptr += increase); } @@ -71,10 +60,8 @@ class arena { void arena_swap_and_clear(); // Given two pointers to objects allocated in the same arena, return the number - // of bytes they are separated by within the virtual block of memory represented - // by the blocks of that arena. This difference will include blocks containing - // sentinel bytes. Undefined behavior will result if the pointers belong to - // different arenas. + // of bytes they are apart. Undefined behavior will result if the pointers + // don't belong to the same arena static ssize_t ptr_diff(char *ptr1, char *ptr2) { return ptr1 - ptr2; } // Given a starting pointer to an address allocated in an arena and a size in @@ -84,11 +71,11 @@ class arena { // 1st argument: the starting pointer // 2nd argument: the size in bytes to add to the starting pointer // 3rd argument: the address of last allocated byte in the arena plus 1 - // Return value: the address allocated in the arena after size bytes from the - // starting pointer, or 0 if this is equal to the 3rd argument. + // Return value: starting pointer + size unless this points to unallocated space + // in which case nullptr is returned static char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { char *next_ptr = ptr + size; - return (next_ptr == arena_end_ptr) ? 0 : next_ptr; + return (next_ptr == arena_end_ptr) ? nullptr : next_ptr; } // Returns the ID of the semispace where the given address was allocated. @@ -97,15 +84,6 @@ class arena { static char get_arena_semispace_id_of_object(void *ptr); private: - union memory_block_header { - // - // Currently the header just holds the semispace id. But we need it to be a - // multiple of sizeof(char*) for alignment purposes so we add a dummy char*. - // - char semispace; - char *alignment_dummy; - }; - // // We update the number of 1MB blocks actually written to, only when we need this value, // or before a garbage collection rather than trying to determine when we write to a fresh block. @@ -121,13 +99,6 @@ class arena { } void initialize_semispace(); - - static memory_block_header *mem_block_header(void *ptr) { - uintptr_t address = reinterpret_cast(ptr); - return reinterpret_cast( - (address - 1) & ~(HYPERBLOCK_SIZE - 1)); - } - // // Current semispace where allocations are being made. // @@ -146,6 +117,19 @@ class arena { = 0; // notional number of BLOCK_SIZE blocks in collection semispace }; +inline char arena::get_arena_semispace_id_of_object(void *ptr) { + // + // We don't have to deal with the "1 past the end of block" case because + // a valid pointer will always point into our hyperblock - we will never return + // an allocation anywhere near the end of our hyperblock. + // + // Set the low bits to 1 to get the address of the last byte in the hyperblock. + // + uintptr_t end_address + = reinterpret_cast(ptr) | (HYPERBLOCK_SIZE - 1); + return *reinterpret_cast(end_address); +} + // Macro to define a new arena with the given ID. Supports IDs ranging from 0 to // 127. #define REGISTER_ARENA(name, id) static thread_local arena name(id) @@ -169,8 +153,11 @@ inline void *arena::kore_arena_alloc(size_t requested) { // collect when allowed. // time_for_collection = true; - tripwire = current_addr_ptr - + HYPERBLOCK_SIZE; // won't trigger again until arena swap + // + // We move the tripwire to 1 past the end of our hyperblock so that we have + // a well defined comparison that will always be false until the next arena swap. + // + tripwire = current_addr_ptr + HYPERBLOCK_SIZE; } void *result = allocation_ptr; allocation_ptr += requested; diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index 9fbde8bf9..c657dada4 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -11,11 +11,6 @@ extern size_t const VAR_BLOCK_SIZE = BLOCK_SIZE; -__attribute__((always_inline)) char -arena::get_arena_semispace_id_of_object(void *ptr) { - return mem_block_header(ptr)->semispace; -} - #ifdef __MACH__ // // thread_local disabled for Apple @@ -46,19 +41,19 @@ void arena::initialize_semispace() { } // // We allocated 2 * HYPERBLOCK_SIZE worth of address space but we're only going to use 1, aligned on a - // HYPERBLOCK_SIZE boundry. This is so we can get the start of the hyperblock by masking any address within it. + // HYPERBLOCK_SIZE boundry. This is so we can get end of the hyperblock by setting the low bits of any + // address within the space to 1. // We don't worry about unused address space either side of our aligned address space because there will be no // memory mapped to it. // current_addr_ptr = reinterpret_cast( std::align(HYPERBLOCK_SIZE, HYPERBLOCK_SIZE, addr, request)); // - // We put a memory_block_header at the beginning so we can identify the semispace a pointer belongs to - // id by masking off the low bits to access this memory_block_header. + // We put a semispace id in the last byte of the hyperblock so we can identify which semispace an address + // belongs to by setting the low bits to 1 to access this id. // - auto *header = reinterpret_cast(current_addr_ptr); - header->semispace = allocation_semispace_id; - allocation_ptr = current_addr_ptr + sizeof(arena::memory_block_header); + current_addr_ptr[HYPERBLOCK_SIZE - 1] = allocation_semispace_id; + allocation_ptr = current_addr_ptr; // // We set the tripwire for this space so we get trigger a garbage collection when we pass BLOCK_SIZE of memory // allocated from this space. diff --git a/runtime/lto/alloc.cpp b/runtime/lto/alloc.cpp index 0cd79a3f8..2928ad62f 100644 --- a/runtime/lto/alloc.cpp +++ b/runtime/lto/alloc.cpp @@ -39,10 +39,6 @@ char oldspace_collection_id() { return oldspace.get_arena_collection_semispace_id(); } -size_t youngspace_size(void) { - return youngspace.arena_size(); -} - void kore_alloc_swap(bool swap_old) { youngspace.arena_swap_and_clear(); if (swap_old) { From c362cf06d3b1b5d7c029c8d32ba33b796bbd5931 Mon Sep 17 00:00:00 2001 From: Roberto Rosmaninho Date: Mon, 9 Dec 2024 17:44:03 -0300 Subject: [PATCH 2/4] Add relevant info for MacOS building process (#1176) --- INSTALL.md | 64 +++++++++++++++++++++++++++++++++++------ cmake/FixHomebrew.cmake | 10 +++++++ 2 files changed, 65 insertions(+), 9 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 0d0707e99..c933f0c19 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -30,7 +30,7 @@ python3 -m pip install pybind11 lit ``` ## macOS / Homebrew - +In order to install the dependencies on macOS, you must have Homebrew installed and on your `PATH`. ```shell brew update brew install \ @@ -40,7 +40,9 @@ brew install \ fmt \ git \ gmp \ + grep \ jemalloc \ + libffi libyaml \ llvm@15 \ maven \ @@ -48,11 +50,60 @@ brew install \ pkg-config \ python3 \ z3 +``` + +To ensure that the backend can use pybind11 correctly, we must create an virtual +environment and install the `pybind11` package: +```shell +python3 -m venv venv +source venv/bin/activate python3 -m pip install pybind11 lit ``` +Guarantee that you have the JDK installed and on your `PATH`: +```shell +export PATH="/opt/homebrew/opt/openjdk/bin:$PATH" +``` + +Some tests rely on GNU Grep options, which are not available on macOS by +default. To ensure that the tests run correctly, you add the path of +GNU Grep to your `PATH` in your shell profile: +```shell +export PATH=/opt/homebrew/Cellar/grep/3.11/libexec/gnubin/:$PATH +``` + # Building +## Environment Variables + +If you're building on macOS, type the following command or epermanently +add it your `env` (`.zshrc`, `.bashrc`, etc.), so that the Homebrew +installation of LLVM gets picked up correctly. We recommend adding it to +your shell profile. +```shell +export LLVM_DIR=$($(brew --prefix llvm@15)/bin/llvm-config --cmakedir) +``` + +If you don't usually use the `clang` from your Homebrew installation as +your default compiler, you can set the following CMake flg to use these +`clang` and `clang++`: +```shell +-DCMAKE_C_COMPILER="$(brew --prefix llvm@15)/bin/clang" \ +-DCMAKE_CXX_COMPILER="$(brew --prefix llvm@15)/bin/clang++" +``` +Once again, we recommend adding them and other llvm binaries to your +`PATH` in your shell profile: +```shell +export PATH="$(brew --prefix llvm@15)/bin:$PATH" +``` + +Some tests rely on GNU Grep options, which are not available on macOS by +default. To ensure that the tests run correctly, you can create an alias +for GNU Grep: +```shell +alias grep=ggrep +``` + Once the system dependencies have been installed, the backend can be built locally with: ```shell @@ -66,12 +117,6 @@ cmake .. \ make -j$(nproc) install ``` -If you're building on macOS, add the following option to your CMake invocation -so that the Homebrew installation of LLVM gets picked up correctly. -```shell --DLLVM_DIR=$($(brew --prefix llvm@15)/bin/llvm-config --cmakedir) -``` - Additionally, to build the pattern-matching compiler, run: ```shell cd matching @@ -91,7 +136,8 @@ To run the integration tests, run: ```shell lit test ``` -from the root source directory. +from the root source directory. You can use `-v` to see which test is being executed +and the output of failling tests. There is also a unit test suite for backend internals; Add the following option to your CMake invocation to enable it: @@ -113,7 +159,7 @@ and conform to best practices. ```shell # Ubuntu -apt install shellcheck clang-format-12 iwyu +apt install shellcheck clang-format-15 iwyu # macOS brew install shellcheck clang-format iwyu diff --git a/cmake/FixHomebrew.cmake b/cmake/FixHomebrew.cmake index 84a82a24d..37595a2c1 100644 --- a/cmake/FixHomebrew.cmake +++ b/cmake/FixHomebrew.cmake @@ -20,5 +20,15 @@ if(APPLE) include_directories(AFTER SYSTEM "${BREW_PREFIX}/include") link_directories(AFTER "${BREW_PREFIX}/lib") set(ENV{PKG_CONFIG_PATH} "${BREW_PREFIX}/opt/libffi/lib/pkgconfig") + + # Use LLD as the linker + # This is necessary as the default linker used by CMake on macOS is + # ld64, which currently has some incompatibilities with Homebrew and XCode15. + # See: https://github.com/orgs/Homebrew/discussions/4794#discussioncomment-7044468 + # Adding this flag avoid the following errors: + # ld: warning: duplicate -rpath ... ignored + # ld: warning: ignoring duplicate libraries ... + add_link_options("-fuse-ld=lld") + endif() # USE_NIX endif() # APPLE From 0bc565f96b373943235f6d6d7860fa713677c957 Mon Sep 17 00:00:00 2001 From: Steven Eker Date: Tue, 10 Dec 2024 09:10:48 -0800 Subject: [PATCH 3/4] Change arena end ptr (#1177) * arena_end_ptr() now returns char *; changes percolated through collector * young_alloc_ptr() and old_alloc_ptr() inlined * evacuate() made a member function of class arena --- include/runtime/arena.h | 10 ++++++---- runtime/alloc/arena.cpp | 1 + runtime/collect/collect.cpp | 22 +++++++++++----------- runtime/lto/alloc.cpp | 10 +--------- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/include/runtime/arena.h b/include/runtime/arena.h index bac3968a3..4bb5ac315 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -22,6 +22,8 @@ class arena { initialize_semispace(); } + char *evacuate(char *scan_ptr); + // Allocates the requested number of bytes as a contiguous region and returns a // pointer to the first allocated byte. void *kore_arena_alloc(size_t requested); @@ -33,7 +35,7 @@ class arena { // Returns a pointer to a location holding the address of last allocated // byte in the given arena plus 1. // This address is nullptr if nothing has been allocated ever in that arena. - char **arena_end_ptr() { return &allocation_ptr; } + char *arena_end_ptr() { return allocation_ptr; } // Clears the current allocation space by setting its start back to its first // block. It is used during garbage collection to effectively collect all of the @@ -73,9 +75,9 @@ class arena { // 3rd argument: the address of last allocated byte in the arena plus 1 // Return value: starting pointer + size unless this points to unallocated space // in which case nullptr is returned - static char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { + static char *move_ptr(char *ptr, size_t size, char const *end_ptr) { char *next_ptr = ptr + size; - return (next_ptr == arena_end_ptr) ? nullptr : next_ptr; + return (next_ptr == end_ptr) ? nullptr : next_ptr; } // Returns the ID of the semispace where the given address was allocated. @@ -132,7 +134,7 @@ inline char arena::get_arena_semispace_id_of_object(void *ptr) { // Macro to define a new arena with the given ID. Supports IDs ranging from 0 to // 127. -#define REGISTER_ARENA(name, id) static thread_local arena name(id) +#define REGISTER_ARENA(name, id) thread_local arena name(id) #ifdef __MACH__ // diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index c657dada4..d10bcaad9 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -7,6 +7,7 @@ #include "runtime/alloc.h" #include "runtime/arena.h" +#include "runtime/collect.h" #include "runtime/header.h" extern size_t const VAR_BLOCK_SIZE = BLOCK_SIZE; diff --git a/runtime/collect/collect.cpp b/runtime/collect/collect.cpp index c439fdd0e..25a580ccb 100644 --- a/runtime/collect/collect.cpp +++ b/runtime/collect/collect.cpp @@ -10,9 +10,9 @@ #include extern "C" { +extern thread_local arena youngspace; +extern thread_local arena oldspace; -char **young_alloc_ptr(void); -char **old_alloc_ptr(void); char *youngspace_ptr(void); char *oldspace_ptr(void); @@ -245,7 +245,7 @@ static void migrate_root(void *curr_block, layoutitem *args, unsigned i) { } } -static char *evacuate(char *scan_ptr, char **alloc_ptr) { +char *arena::evacuate(char *scan_ptr) { auto *curr_block = (block *)scan_ptr; uint64_t const hdr = curr_block->h.hdr; uint16_t layout_int = layout_hdr(hdr); @@ -255,7 +255,7 @@ static char *evacuate(char *scan_ptr, char **alloc_ptr) { migrate_child(curr_block, layout_data->args, i, false); } } - return arena::move_ptr(scan_ptr, get_size(hdr, layout_int), *alloc_ptr); + return move_ptr(scan_ptr, get_size(hdr, layout_int), arena_end_ptr()); } // Contains the decision logic for collecting the old generation. @@ -293,7 +293,7 @@ void kore_collect( if (!last_alloc_ptr) { last_alloc_ptr = youngspace_ptr(); } - char *current_alloc_ptr = *young_alloc_ptr(); + char *current_alloc_ptr = youngspace.arena_end_ptr(); #endif kore_alloc_swap(collect_old); #ifdef GC_DBG @@ -301,16 +301,16 @@ void kore_collect( numBytesLiveAtCollection[i] = 0; } #endif - char *previous_oldspace_alloc_ptr = *old_alloc_ptr(); + char *previous_oldspace_alloc_ptr = oldspace.arena_end_ptr(); for (int i = 0; i < nroots; i++) { migrate_root(roots, type_info, i); } migrate_static_roots(); char *scan_ptr = youngspace_ptr(); - if (scan_ptr != *young_alloc_ptr()) { + if (scan_ptr != youngspace.arena_end_ptr()) { MEM_LOG("Evacuating young generation\n"); while (scan_ptr) { - scan_ptr = evacuate(scan_ptr, young_alloc_ptr()); + scan_ptr = youngspace.evacuate(scan_ptr); } } if (collect_old || !previous_oldspace_alloc_ptr) { @@ -318,10 +318,10 @@ void kore_collect( } else { scan_ptr = previous_oldspace_alloc_ptr; } - if (scan_ptr != *old_alloc_ptr()) { + if (scan_ptr != oldspace.arena_end_ptr()) { MEM_LOG("Evacuating old generation\n"); while (scan_ptr) { - scan_ptr = evacuate(scan_ptr, old_alloc_ptr()); + scan_ptr = oldspace.evacuate(scan_ptr); } } #ifdef GC_DBG @@ -329,7 +329,7 @@ void kore_collect( = arena::ptr_diff(current_alloc_ptr, last_alloc_ptr); assert(numBytesAllocedSinceLastCollection >= 0); fwrite(&numBytesAllocedSinceLastCollection, sizeof(ssize_t), 1, stderr); - last_alloc_ptr = *young_alloc_ptr(); + last_alloc_ptr = youngspace.arena_end_ptr(); fwrite( numBytesLiveAtCollection, sizeof(numBytesLiveAtCollection[0]), sizeof(numBytesLiveAtCollection) / sizeof(numBytesLiveAtCollection[0]), diff --git a/runtime/lto/alloc.cpp b/runtime/lto/alloc.cpp index 2928ad62f..aaafc932d 100644 --- a/runtime/lto/alloc.cpp +++ b/runtime/lto/alloc.cpp @@ -23,14 +23,6 @@ char *oldspace_ptr() { return oldspace.arena_start_ptr(); } -char **young_alloc_ptr() { - return youngspace.arena_end_ptr(); -} - -char **old_alloc_ptr() { - return oldspace.arena_end_ptr(); -} - char youngspace_collection_id() { return youngspace.get_arena_collection_semispace_id(); } @@ -81,7 +73,7 @@ kore_resize_last_alloc(void *oldptr, size_t newrequest, size_t last_size) { newrequest = (newrequest + 7) & ~7; last_size = (last_size + 7) & ~7; - if (oldptr != *(youngspace.arena_end_ptr()) - last_size) { + if (oldptr != youngspace.arena_end_ptr() - last_size) { MEM_LOG( "May only reallocate last allocation. Tried to reallocate %p to %zd\n", oldptr, newrequest); From f0985353f8d4e1364ecd4a924d1395a4eb76b186 Mon Sep 17 00:00:00 2001 From: Dwight Guth Date: Tue, 10 Dec 2024 13:00:42 -0600 Subject: [PATCH 4/4] do not garbage collect empty map/list/set (#1168) We move the empty map/list/set allocations to be allocated using `new` and never deallocated or relocated in order to assist in the thread safety of the llvm backend. When we make each allocation arena thread-local, this change will be required, otherwise garbage collection in one thread will corrupt collections in other threads. --------- Co-authored-by: F-WRunTime --- .github/workflows/release.yml | 4 ++-- .github/workflows/test.yml | 4 ++-- include/runtime/header.h | 7 +++++-- runtime/collect/collect.cpp | 2 ++ runtime/collect/migrate_collection.cpp | 3 +++ runtime/collect/migrate_static_roots.cpp | 8 -------- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9aa84de92..698da9054 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -86,8 +86,8 @@ jobs: include: - runner: [self-hosted, linux, normal] os: ubuntu-24.04 - - runner: MacM1 - os: self-macos-12 + - runner: [self-hosted, self-macos-latest] + os: self-macos-latest runs-on: ${{ matrix.runner }} steps: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dc5104876..b220f0055 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,8 +17,8 @@ jobs: include: - runner: [self-hosted, linux, normal] os: ubuntu-24.04 - - runner: MacM1 - os: self-macos-12 + - runner: [self-hosted, self-macos-latest] + os: self-macos-latest runs-on: ${{ matrix.runner }} steps: - name: 'Check out code' diff --git a/include/runtime/header.h b/include/runtime/header.h index d82cae004..3846e4ef3 100644 --- a/include/runtime/header.h +++ b/include/runtime/header.h @@ -91,7 +91,10 @@ struct kore_alloc_heap { template static void *allocate(size_t size, Tags...) { if (during_gc()) { - return ::operator new(size); + auto *result = (string *)::operator new(size + sizeof(blockheader)); + init_with_len(result, size); + result->h.hdr |= NOT_YOUNG_OBJECT_BIT; + return result->data; } bool enabled = gc_enabled; gc_enabled = false; @@ -103,7 +106,7 @@ struct kore_alloc_heap { static void deallocate(size_t size, void *data) { if (during_gc()) { - ::operator delete(data); + ::operator delete((char *)data - sizeof(blockheader)); } } }; diff --git a/runtime/collect/collect.cpp b/runtime/collect/collect.cpp index 25a580ccb..f49123950 100644 --- a/runtime/collect/collect.cpp +++ b/runtime/collect/collect.cpp @@ -274,9 +274,11 @@ static bool should_collect_old_gen() { } void init_static_objects(void) { + is_gc = true; map m = map(); list l = list(); set s = set(); + is_gc = false; set_kore_memory_functions_for_gmp(); } diff --git a/runtime/collect/migrate_collection.cpp b/runtime/collect/migrate_collection.cpp index c6e644e0e..39b1a0cdf 100644 --- a/runtime/collect/migrate_collection.cpp +++ b/runtime/collect/migrate_collection.cpp @@ -6,6 +6,9 @@ void migrate_collection_node(void **node_ptr) { string *curr_block = STRUCT_BASE(string, data, *node_ptr); + if (!is_heap_block(curr_block)) { + return; + } if (youngspace_collection_id() != arena::get_arena_semispace_id_of_object((void *)curr_block) && oldspace_collection_id() diff --git a/runtime/collect/migrate_static_roots.cpp b/runtime/collect/migrate_static_roots.cpp index 3474e83ee..4a47f8e0d 100644 --- a/runtime/collect/migrate_static_roots.cpp +++ b/runtime/collect/migrate_static_roots.cpp @@ -10,14 +10,6 @@ extern thread_local bool kllvm_rand_state_initialized; extern "C" { void migrate_static_roots() { - auto &l1 = list_impl::empty_root(); - migrate_collection_node((void **)&l1); - auto &l2 = list_impl::empty_tail(); - migrate_collection_node((void **)&l2); - auto &s = set_impl::empty(); - migrate_collection_node((void **)&s); - auto &m = map_impl::empty(); - migrate_collection_node((void **)&m); if (kllvm_rand_state_initialized) { auto &rand = kllvm_rand_state->_mp_seed->_mp_d; string *limbs = STRUCT_BASE(string, data, rand);