diff --git a/.clabot b/.clabot index 2b96339d1..fa8e8b23c 100644 --- a/.clabot +++ b/.clabot @@ -15,7 +15,7 @@ "xvzcf": "Cryspen", "msprotz": "Microsoft", "R1kM": "Inria", - "samuel-lee-msft": "Microsoft + "samuel-lee-msft": "Microsoft" }, "contributors": [ "franziskuskiefer", diff --git a/.gitignore b/.gitignore index bbfe8fd8c..e8d79b9e0 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,5 @@ rust/.c .idea cmake-build-debug +# ctags +tags diff --git a/CMakeLists.txt b/CMakeLists.txt index dcdd2f684..cbab1cded 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,14 +49,19 @@ endif() set(hacl_VERSION_TWEAK "") -# Load global config from exteral file. -# This file must be generated before running cmake with ./mach.py --configure +# Load global config from external file. +# For a Clang build, this file can be generated before running cmake with +# ./mach.py --configure # If the build is invoked through ./mach.py, a separate configuration is not # needed. -# If the file is not present, i.e. cmake was invoked directly, we copy the default -# config from config/default_config.cmake +# We copy a default config from config/default_config.cmake or +# config/default_config_msvc.cmake if(NOT EXISTS ${PROJECT_SOURCE_DIR}/build/config.cmake) - configure_file(${PROJECT_SOURCE_DIR}/config/default_config.cmake ${PROJECT_SOURCE_DIR}/build/config.cmake COPYONLY) + if(USE_MSVC) + configure_file(${PROJECT_SOURCE_DIR}/config/default_config_msvc.cmake ${PROJECT_SOURCE_DIR}/build/config.cmake COPYONLY) + else() + configure_file(${PROJECT_SOURCE_DIR}/config/default_config.cmake ${PROJECT_SOURCE_DIR}/build/config.cmake COPYONLY) + endif() endif() # Now include the config. @@ -85,8 +90,11 @@ set(CMAKE_C_STANDARD_REQUIRED True) include(build/config.cmake) # Configure different targets -# TODO: Set flags for MSVC -if(NOT MSVC) +if(MSVC) + add_compile_options( + $<$:/Ob3> + ) +else() add_compile_options( # -Wall # -Wextra @@ -99,12 +107,20 @@ if(NOT MSVC) ) endif() -if(WIN32 AND NOT MSVC) - # Enable everywhere for windows as long as libintvector.h is not included correctly. - add_compile_options( - -mavx - -mavx2 - ) +if(WIN32) + # Enable AVX and AVX2 everywhere for Windows as long as libintvector.h is not included correctly. + if(MSVC) + add_compile_options( + $<$:/arch:AVX> + $<$:/arch:AVX2> + ) + else() + # On Windows with clang-cl (our default) we take the Linux assembly + add_compile_options( + -mavx + -mavx2 + ) + endif() endif() # Set include paths @@ -168,6 +184,11 @@ if(ENABLE_UBSAN) add_link_options(-fsanitize=undefined) endif() +if(ENABLE_MSAN) + add_compile_options(-fsanitize=memory -fsanitize-memory-track-origins) + add_link_options(-fsanitize=memory) +endif() + # Sources are written by mach.py into the following lists # - SOURCES_std: All regular files # - SOURCES_vec128: Files that require vec128 hardware @@ -335,7 +356,7 @@ configure_file(config/Config.h.in config.h) # # Dynamic library add_library(hacl SHARED ${SOURCES_std} ${VALE_OBJECTS}) if(NOT MSVC) - target_compile_options(hacl PRIVATE -Wsign-conversion -Wconversion -Wall -Wextra -pedantic) + target_compile_options(hacl PRIVATE -Wall -Wextra -pedantic) endif() if(TOOLCHAIN_CAN_COMPILE_VEC128 AND HACL_VEC128_O) @@ -359,6 +380,13 @@ if(TOOLCHAIN_CAN_COMPILE_VEC256 AND HACL_VEC256_O) target_sources(hacl_static PRIVATE $) endif() +if(BUILD_LIBCRUX) + add_library(libcrux_static STATIC ${LIBCRUX_SOURCES}) + if(NOT MSVC) + target_compile_options(libcrux_static PRIVATE -Wall -Wextra -pedantic -Wshadow -Wunused-function) + endif() +endif() + # Install # # This allows package maintainers to control the install destination by setting # # the appropriate cache variables. @@ -465,10 +493,11 @@ if(ENABLE_TESTS) target_compile_options(${TEST_NAME} PRIVATE /std:c++20) endif(MSVC) - add_dependencies(${TEST_NAME} hacl hacl_cpu_features) + add_dependencies(${TEST_NAME} hacl libcrux_static hacl_cpu_features) target_link_libraries(${TEST_NAME} PRIVATE gtest_main hacl_static + libcrux_static hacl_cpu_features nlohmann_json::nlohmann_json ) @@ -482,6 +511,43 @@ if(ENABLE_TESTS) ${PROJECT_SOURCE_DIR}/tests/${TEST_NAME} $) endif() endforeach() + + if(BUILD_LIBCRUX) + foreach(TEST_FILE IN LISTS LIBCRUX_TEST_SOURCES) + get_filename_component(TEST_NAME ${TEST_FILE} NAME_WE) + add_executable(${TEST_NAME} + ${TEST_FILE} + ) + + # Coverage + if(ENABLE_COVERAGE) + target_compile_options(${TEST_NAME} PRIVATE -fprofile-instr-generate -fcoverage-mapping) + target_link_options(${TEST_NAME} PRIVATE -fprofile-instr-generate -fcoverage-mapping) + endif() + + if(MSVC) + # MSVC needs a modern C++ for designated initializers. + target_compile_options(${TEST_NAME} PRIVATE /std:c++20) + endif(MSVC) + + add_dependencies(${TEST_NAME} hacl libcrux_static hacl_cpu_features) + target_link_libraries(${TEST_NAME} PRIVATE + gtest_main + hacl_cpu_features + nlohmann_json::nlohmann_json + libcrux_static + hacl_static + ) + + if(EXISTS ${PROJECT_SOURCE_DIR}/tests/${TEST_NAME}) + # Copy test input files. They must be in a directory with the same + # name as the test and get copied to the build directory. + add_custom_command(TARGET ${TEST_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${PROJECT_SOURCE_DIR}/tests/${TEST_NAME} $) + endif() + endforeach() + endif() endif() # Benchmarks @@ -504,7 +570,8 @@ if(ENABLE_BENCHMARKS) FetchContent_Populate(benchmark GIT_REPOSITORY https://github.com/google/benchmark.git # The latest release 1.7.1 is broken due to https://github.com/google/benchmark/pull/1517 - GIT_TAG b177433f3ee2513b1075140c723d73ab8901790f + # But also: need the fix for https://github.com/google/benchmark/pull/1669 + GIT_TAG bc946b919cac6f25a199a526da571638cfde109f ) add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR}) @@ -542,9 +609,10 @@ if(ENABLE_BENCHMARKS) target_compile_options(${BENCH_NAME} PRIVATE /std:c++20) endif(NOT MSVC) - add_dependencies(${BENCH_NAME} hacl hacl_cpu_features) + add_dependencies(${BENCH_NAME} hacl libcrux_static hacl_cpu_features) target_link_libraries(${BENCH_NAME} PRIVATE hacl_static + libcrux_static ecckiila blake2 digestif @@ -552,4 +620,32 @@ if(ENABLE_BENCHMARKS) benchmark::benchmark ) endforeach() + + if(BUILD_LIBCRUX) + foreach(BENCH_FILE IN LISTS LIBCRUX_BENCHMARK_SOURCES) + get_filename_component(BENCH_NAME ${BENCH_FILE} NAME_WE) + set(BENCH_NAME ${BENCH_NAME}_benchmark) + add_executable(${BENCH_NAME} + ${BENCH_FILE} + ) + + # Use modern C++ + if(NOT MSVC) + target_compile_options(${BENCH_NAME} PRIVATE -std=c++17) + else() + # MSVC needs a modern C++ for designated initializers. + target_compile_options(${BENCH_NAME} PRIVATE /std:c++20) + endif(NOT MSVC) + + target_compile_definitions(${BENCH_NAME} PUBLIC NO_OPENSSL) + + add_dependencies(${BENCH_NAME} hacl libcrux_static hacl_cpu_features) + target_link_libraries(${BENCH_NAME} PRIVATE + hacl_cpu_features + benchmark::benchmark + libcrux_static + hacl_static + ) + endforeach() + endif() endif(ENABLE_BENCHMARKS) diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 000000000..2c296848e --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,110 @@ +{ + "version": 6, + "cmakeMinimumRequired": { + "major": 3, + "minor": 19, + "patch": 0 + }, + "configurePresets": [ + { + "name": "default", + "displayName": "Default Config", + "hidden": true, + "binaryDir": "${sourceDir}/build/${presetName}", + "generator": "Ninja Multi-Config", + "cacheVariables": { + "CMAKE_INSTALL_PREFIX": "${sourceDir}/install/${presetName}", + "BUILD_LIBCRUX": "ON", + "ENABLE_TESTS": "ON", + "ENABLE_BENCHMARKS": "ON" + }, + "architecture": { + "value": "x64", + "strategy": "external" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Windows" + }, + "vendor": { + "microsoft.com/VisualStudioSettings/CMake/1.0": { + "hostOS": [ "Windows" ] + } + } + }, + { + "name": "ninja.msvc", + "inherits": "default", + "displayName": "x64 Ninja Multi-Config MSVC", + "description": "x64 MSVC build using Ninja Multi-Config generator", + "cacheVariables": { + "USE_MSVC": "ON" + } + }, + { + "name": "ninja.clang", + "inherits": "default", + "displayName": "x64 Ninja Multi-Config Clang", + "description": "x64 Clang build using Ninja Multi-Config generator" + } + ], + "buildPresets": [ + { + "name": "ninja.msvc.Release", + "description": "x64 MSVC build using Ninja Multi-Config generator - Release", + "displayName": "Release", + "configurePreset": "ninja.msvc", + "configuration": "Release" + }, + { + "name": "ninja.msvc.Debug", + "description": "x64 MSVC build using Ninja Multi-Config generator - Debug", + "displayName": "Debug", + "configurePreset": "ninja.msvc", + "configuration": "Debug" + }, + { + "name": "ninja.msvc.MinSizeRel", + "description": "x64 MSVC build using Ninja Multi-Config generator - MinSizeRel", + "displayName": "MinSizeRel", + "configurePreset": "ninja.msvc", + "configuration": "MinSizeRel" + }, + { + "name": "ninja.msvc.RelWithDebInfo", + "description": "x64 MSVC build using Ninja Multi-Config generator - RelWithDebInfo", + "displayName": "RelWithDebInfo", + "configurePreset": "ninja.msvc", + "configuration": "RelWithDebInfo" + }, + { + "name": "ninja.clang.Release", + "description": "x64 Clang build using Ninja Multi-Config generator - Release", + "displayName": "Release", + "configurePreset": "ninja.clang", + "configuration": "Release" + }, + { + "name": "ninja.clang.Debug", + "description": "x64 Clang build using Ninja Multi-Config generator - Debug", + "displayName": "Debug", + "configurePreset": "ninja.clang", + "configuration": "Debug" + }, + { + "name": "ninja.clang.MinSizeRel", + "description": "x64 Clang build using Ninja Multi-Config generator - MinSizeRel", + "displayName": "MinSizeRel", + "configurePreset": "ninja.clang", + "configuration": "MinSizeRel" + }, + { + "name": "ninja.clang.RelWithDebInfo", + "description": "x64 Clang build using Ninja Multi-Config generator - RelWithDebInfo", + "displayName": "RelWithDebInfo", + "configurePreset": "ninja.clang", + "configuration": "RelWithDebInfo" + } + ] +} diff --git a/README.md b/README.md index de08ca48f..2fb75989d 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ We need the following dependencies ... - [cmake] (3.17 or newer) - [ninja] (1.10 or newer) - [python] (3.6 or newer) -- [clang] (7 or newer) or [gcc] (7 or newer) +- [clang] (7 or newer), [gcc] (7 or newer), or MSVC on Windows (tested with 19.39.33519) Depending on your system you can install them as follows (click to expand) ... @@ -39,7 +39,7 @@ $ sudo pacman -S gcc
Fedora - + ```sh $ sudo dnf install cmake ninja-build python3 @@ -51,7 +51,7 @@ $ sudo dnf install gcc
Ubuntu - + ```sh $ sudo apt install cmake ninja-build python3 @@ -73,15 +73,111 @@ $ brew install gcc ```
+
+ Windows + +```powershell +> winget install vswhere +> winget install python +> winget install Ninja-build.Ninja +``` + +To use WinGet to install Visual Studio 2022 with all the required components, first create an installation configuration file `vsconfig` with +the following contents: + +```json +{ + "version": "1.0", + "components": [ + "Microsoft.Component.MSBuild", + "Microsoft.VisualStudio.Component.CoreEditor", + "Microsoft.VisualStudio.Component.NuGet", + "Microsoft.VisualStudio.Component.Roslyn.Compiler", + "Microsoft.VisualStudio.Component.TextTemplating", + "Microsoft.VisualStudio.Component.VC.CoreIde", + "Microsoft.VisualStudio.Component.VC.Redist.14.Latest", + "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "Microsoft.VisualStudio.Component.VC.Llvm.Clang", + "Microsoft.VisualStudio.ComponentGroup.ArchitectureTools.Native", + "Microsoft.VisualStudio.ComponentGroup.WebToolsExtensions.CMake", + "Microsoft.VisualStudio.Component.VC.CMake.Project", + "Microsoft.VisualStudio.Component.Windows11SDK.22621", + "Microsoft.VisualStudio.Component.Windows11Sdk.WindowsPerformanceToolkit", + "Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", + "Microsoft.VisualStudio.Workload.CoreEditor", + "Microsoft.VisualStudio.Workload.NativeDesktop" + ] +} +``` + +Then, for instance, to install the Community edition of Visual Studio 2022 run the following command: + +```powershell +> winget install --source winget --exact --id Microsoft.VisualStudio.2022.Community --override "--passive --config C:\vsconfig" +``` +
+ ## Build (and test) HACL Packages You can run ... ```sh -$ ./mach build --test +$ python mach build --test +``` + +... to build HACL Packages and run the tests. All actions are driven by [mach]. See `python mach --help` for details. + +## Detailed build instructions + +### Using `mach` + +When switching between MSVC and Clang builds, invalidate the CMake cache by deleting `build\.cache` and `build\CMakeCache.txt` + +
+ x64 Release distribution for Windows using Clang + +From a Developer Command Prompt for VS 2022. +```powershell +python mach build --release --benchmark --no-openssl ``` +
+ +
+ x64 Release distribution for Windows using MSVC + +From a Developer Command Prompt for VS 2022. +```powershell +python mach build --release --benchmark --msvc --no-openssl +``` +
+ + +### Using CMake + +CMake presets have the advantage to provide a consistent build experience across VS, VS Code, and CLI ([CMake Presets integration in Visual Studio and Visual Studio Code](https://devblogs.microsoft.com/cppblog/cmake-presets-integration-in-visual-studio-and-visual-studio-code/)). For instance, when loading the `hacl-packages` folder in Visual Studio 2022, configuration and build presets can be selected from drop-down lists in the toolbar. The presets provided use the Ninja Multi-Config generator. The examples below, show how to build with these presets from a CLI. + +
+ x64 Release distribution for Windows using Clang + +From a Developer Command Prompt for VS 2022. +```powershell +cmake --preset ninja.clang +# The build will be in build\ninja.clang\Release +cmake --build --preset ninja.clang.Release +``` +
+ +
+ x64 Release distribution for Windows using MSVC + +From a Developer Command Prompt for VS 2022. +```powershell +cmake --preset ninja.msvc +# The build will be in build\ninja.msvc\Release +cmake --build --preset ninja.msvc.Release +``` +
-... to build HACL Packages and run the tests. All actions are driven by [mach]. See `./mach --help` for details. ## Platform support @@ -163,7 +259,7 @@ packages. Testing is done with [gtest] and requires a C++11 compiler (or C++20 MSVC). -### Measure Test Coverage +### Measure Test Coverage (on Linux using LLVM) Test coverage in HACL Packages can be measured with ... @@ -173,13 +269,13 @@ Test coverage in HACL Packages can be measured with ... ./tools/coverage.sh ``` -Note that only clang is supported as a compiler and you may get an error ... +Note that only Clang is supported as a compiler and you may get an error ... ``` cc: error: unrecognized command-line option ‘-fprofile-instr-generate’; did you mean ‘-fprofile-generate’? ``` -... when your default compiler is not clang. +... when your default compiler is not Clang. In this case, try to set the `CC` and `CXX` environment variables accordingly ... ```sh diff --git a/_build.sh b/_build.sh index 183e0302f..5dd654e3a 100755 --- a/_build.sh +++ b/_build.sh @@ -6,5 +6,5 @@ printf " ! USE ./mach FOR MORE OPTIONS !\n\n" mkdir build cp config/default_config.cmake build/config.cmake -cmake -B build -G"Ninja Multi-Config" +cmake -B build -G"Ninja Multi-Config" -DBUILD_LIBCRUX=ON ninja -f build-Release.ninja -C build diff --git a/benchmarks/kyber.cc b/benchmarks/kyber.cc new file mode 100644 index 000000000..50e16108b --- /dev/null +++ b/benchmarks/kyber.cc @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Cryspen Sarl + * + * Licensed under the Apache License, Version 2.0 or MIT. + * - http://www.apache.org/licenses/LICENSE-2.0 + * - http://opensource.org/licenses/MIT + */ + +#include "Libcrux_Kem_Kyber_Kyber768.h" +#include "util.h" + +static void +kyber768_key_generation(benchmark::State& state) +{ + uint8_t randomness[64]; + generate_random(randomness, 64); + + uint8_t public_key[KYBER768_PUBLICKEYBYTES]; + uint8_t secret_key[KYBER768_SECRETKEYBYTES]; + + for (auto _ : state) { + Libcrux_Kyber768_GenerateKeyPair(public_key, secret_key, randomness); + } +} + +static void +kyber768_encapsulation(benchmark::State& state) +{ + uint8_t randomness[32]; + generate_random(randomness, 32); + + uint8_t public_key[KYBER768_PUBLICKEYBYTES]; + uint8_t secret_key[KYBER768_SECRETKEYBYTES]; + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + + Libcrux_Kyber768_GenerateKeyPair(public_key, secret_key, randomness); + + for (auto _ : state) { + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &public_key, randomness); + } +} + +static void +kyber768_decapsulation(benchmark::State& state) +{ + uint8_t randomness[64]; + + uint8_t public_key[KYBER768_PUBLICKEYBYTES]; + uint8_t secret_key[KYBER768_SECRETKEYBYTES]; + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + + generate_random(randomness, 64); + Libcrux_Kyber768_GenerateKeyPair(public_key, secret_key, randomness); + + generate_random(randomness, 32); + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &public_key, randomness); + + for (auto _ : state) { + Libcrux_Kyber768_Decapsulate(sharedSecret, &ciphertext, &secret_key); + } +} + +BENCHMARK(kyber768_key_generation)->Setup(DoSetup); +BENCHMARK(kyber768_encapsulation)->Setup(DoSetup); +BENCHMARK(kyber768_decapsulation)->Setup(DoSetup); + +BENCHMARK_MAIN(); diff --git a/benchmarks/sha3.cc b/benchmarks/sha3.cc index de744d8f8..1deb82c85 100644 --- a/benchmarks/sha3.cc +++ b/benchmarks/sha3.cc @@ -1,14 +1,36 @@ #include "util.h" #include "Hacl_Hash_SHA3.h" +#include "Hacl_Hash_SHA3_Scalar.h" + +#include "util.h" + +#ifdef HACL_CAN_COMPILE_VEC256 +#include "Hacl_Hash_SHA3_Simd256.h" +#endif const bytes input(1000, 0x37); -static bytes digest224(28, 0); -static bytes digest256(32, 0); -static bytes digest384(48, 0); -static bytes digest512(64, 0); -static bytes digest_shake(42, 0); +static bytes digest224_0(28, 0); +static bytes digest224_1(28, 0); +static bytes digest224_2(28, 0); +static bytes digest224_3(28, 0); +static bytes digest256_0(32, 0); +static bytes digest256_1(32, 0); +static bytes digest256_2(32, 0); +static bytes digest256_3(32, 0); +static bytes digest384_0(48, 0); +static bytes digest384_1(48, 0); +static bytes digest384_2(48, 0); +static bytes digest384_3(48, 0); +static bytes digest512_0(64, 0); +static bytes digest512_1(64, 0); +static bytes digest512_2(64, 0); +static bytes digest512_3(64, 0); +static bytes digest_shake_0(42, 0); +static bytes digest_shake_1(42, 0); +static bytes digest_shake_2(42, 0); +static bytes digest_shake_3(42, 0); const size_t chunk_len = 135; @@ -27,9 +49,10 @@ static void Hacl_Sha3_224(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_224(digest224.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_224( + digest224_0.data(), (uint8_t*)input.data(), input.size()); } - if (digest224 != expected_digest_sha3_224) { + if (digest224_0 != expected_digest_sha3_224) { state.SkipWithError("Incorrect digest."); return; } @@ -37,12 +60,59 @@ Hacl_Sha3_224(benchmark::State& state) BENCHMARK(Hacl_Sha3_224)->Setup(DoSetup); +static void +Hacl_Sha3_224_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_sha3_224( + digest224_0.data(), (uint8_t*)input.data(), input.size()); + } + if (digest224_0 != expected_digest_sha3_224) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_224_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_224_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_sha3_224(digest224_0.data(), + digest224_1.data(), + digest224_2.data(), + digest224_3.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } + if (digest224_0 != expected_digest_sha3_224 || + digest224_1 != expected_digest_sha3_224 || + digest224_2 != expected_digest_sha3_224 || + digest224_3 != expected_digest_sha3_224) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_224_Simd256)->Setup(DoSetup); +#endif + #ifndef NO_OPENSSL BENCHMARK_CAPTURE(OpenSSL_hash_oneshot, sha3_224, EVP_sha3_224(), input, - digest224.size(), + digest224_0.size(), expected_digest_sha3_224) ->Setup(DoSetup); #endif @@ -51,9 +121,10 @@ static void Hacl_Sha3_256(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_256(digest256.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_256( + digest256_0.data(), (uint8_t*)input.data(), input.size()); } - if (digest256 != expected_digest_sha3_256) { + if (digest256_0 != expected_digest_sha3_256) { state.SkipWithError("Incorrect digest."); return; } @@ -88,12 +159,59 @@ Digestif_sha3_256(benchmark::State& state) BENCHMARK(Digestif_sha3_256)->Setup(DoSetup); +static void +Hacl_Sha3_256_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_sha3_256( + digest256_0.data(), (uint8_t*)input.data(), input.size()); + } + if (digest256_0 != expected_digest_sha3_256) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_256_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_256_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_sha3_256(digest256_0.data(), + digest256_1.data(), + digest256_2.data(), + digest256_3.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } + if (digest256_0 != expected_digest_sha3_256 || + digest256_1 != expected_digest_sha3_256 || + digest256_2 != expected_digest_sha3_256 || + digest256_3 != expected_digest_sha3_256) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_256_Simd256)->Setup(DoSetup); +#endif + #ifndef NO_OPENSSL BENCHMARK_CAPTURE(OpenSSL_hash_oneshot, sha3_256, EVP_sha3_256(), input, - digest256.size(), + digest256_0.size(), expected_digest_sha3_256) ->Setup(DoSetup); #endif @@ -102,9 +220,10 @@ static void Hacl_Sha3_384(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_384(digest384.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_384( + digest384_0.data(), (uint8_t*)input.data(), input.size()); } - if (digest384 != expected_digest_sha3_384) { + if (digest384_0 != expected_digest_sha3_384) { state.SkipWithError("Incorrect digest."); return; } @@ -112,12 +231,59 @@ Hacl_Sha3_384(benchmark::State& state) BENCHMARK(Hacl_Sha3_384)->Setup(DoSetup); +static void +Hacl_Sha3_384_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_sha3_384( + digest384_0.data(), (uint8_t*)input.data(), input.size()); + } + if (digest384_0 != expected_digest_sha3_384) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_384_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_384_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_sha3_384(digest384_0.data(), + digest384_1.data(), + digest384_2.data(), + digest384_3.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } + if (digest384_0 != expected_digest_sha3_384 || + digest384_1 != expected_digest_sha3_384 || + digest384_2 != expected_digest_sha3_384 || + digest384_3 != expected_digest_sha3_384) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_384_Simd256)->Setup(DoSetup); +#endif + #ifndef NO_OPENSSL BENCHMARK_CAPTURE(OpenSSL_hash_oneshot, sha3_384, EVP_sha3_384(), input, - digest384.size(), + digest384_0.size(), expected_digest_sha3_384) ->Setup(DoSetup); #endif @@ -126,9 +292,10 @@ static void Hacl_Sha3_512(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_sha3_512(digest512.data(), (uint8_t*)input.data(), input.size()); + Hacl_Hash_SHA3_sha3_512( + digest512_0.data(), (uint8_t*)input.data(), input.size()); } - if (digest512 != expected_digest_sha3_512) { + if (digest512_0 != expected_digest_sha3_512) { state.SkipWithError("Incorrect digest."); return; } @@ -161,12 +328,59 @@ Digestif_sha3_512(benchmark::State& state) BENCHMARK(Digestif_sha3_512)->Setup(DoSetup); +static void +Hacl_Sha3_512_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_sha3_512( + digest512_0.data(), (uint8_t*)input.data(), input.size()); + } + if (digest512_0 != expected_digest_sha3_512) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_512_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_512_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_sha3_512(digest512_0.data(), + digest512_1.data(), + digest512_2.data(), + digest512_3.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } + if (digest512_0 != expected_digest_sha3_512 || + digest512_1 != expected_digest_sha3_512 || + digest512_2 != expected_digest_sha3_512 || + digest512_3 != expected_digest_sha3_512) { + state.SkipWithError("Incorrect digest."); + return; + } +} + +BENCHMARK(Hacl_Sha3_512_Simd256)->Setup(DoSetup); +#endif + #ifndef NO_OPENSSL BENCHMARK_CAPTURE(OpenSSL_hash_oneshot, sha3_512, EVP_sha3_512(), input, - digest512.size(), + digest512_0.size(), expected_digest_sha3_512) ->Setup(DoSetup); #endif @@ -188,11 +402,11 @@ Hacl_Sha3_256_Streaming(benchmark::State& state) } // Finish - Hacl_Hash_SHA3_digest(sha_state, digest256.data()); + Hacl_Hash_SHA3_digest(sha_state, digest256_0.data()); Hacl_Hash_SHA3_free(sha_state); } - if (digest256 != expected_digest_sha3_256) { + if (digest256_0 != expected_digest_sha3_256) { state.SkipWithError("Incorrect digest."); return; } @@ -206,7 +420,7 @@ BENCHMARK_CAPTURE(OpenSSL_hash_streaming, EVP_sha3_224(), input, chunk_len, - digest224.size(), + digest224_0.size(), expected_digest_sha3_224) ->Setup(DoSetup); @@ -215,7 +429,7 @@ BENCHMARK_CAPTURE(OpenSSL_hash_streaming, EVP_sha3_256(), input, chunk_len, - digest256.size(), + digest256_0.size(), expected_digest_sha3_256) ->Setup(DoSetup); @@ -224,7 +438,7 @@ BENCHMARK_CAPTURE(OpenSSL_hash_streaming, EVP_sha3_384(), input, chunk_len, - digest384.size(), + digest384_0.size(), expected_digest_sha3_384) ->Setup(DoSetup); @@ -233,7 +447,7 @@ BENCHMARK_CAPTURE(OpenSSL_hash_streaming, EVP_sha3_512(), input, chunk_len, - digest512.size(), + digest512_0.size(), expected_digest_sha3_512) ->Setup(DoSetup); #endif @@ -242,22 +456,104 @@ static void Hacl_Sha3_shake128(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_shake128_hacl( - input.size(), (uint8_t*)input.data(), digest_shake.size(), digest_shake.data()); + Hacl_Hash_SHA3_shake128_hacl(input.size(), + (uint8_t*)input.data(), + digest_shake_0.size(), + digest_shake_0.data()); } } BENCHMARK(Hacl_Sha3_shake128)->Setup(DoSetup); +static void +Hacl_Sha3_shake128_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_shake128(digest_shake_0.data(), + digest_shake_0.size(), + (uint8_t*)input.data(), + input.size()); + } +} + +BENCHMARK(Hacl_Sha3_shake128_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_shake128_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_shake128(digest_shake_0.data(), + digest_shake_1.data(), + digest_shake_2.data(), + digest_shake_3.data(), + digest_shake_0.size(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } +} + +BENCHMARK(Hacl_Sha3_shake128_Simd256)->Setup(DoSetup); +#endif + static void Hacl_Sha3_shake256(benchmark::State& state) { for (auto _ : state) { - Hacl_Hash_SHA3_shake256_hacl( - input.size(), (uint8_t*)input.data(), digest_shake.size(), digest_shake.data()); + Hacl_Hash_SHA3_shake256_hacl(input.size(), + (uint8_t*)input.data(), + digest_shake_0.size(), + digest_shake_0.data()); } } BENCHMARK(Hacl_Sha3_shake256)->Setup(DoSetup); +static void +Hacl_Sha3_shake256_Scalar(benchmark::State& state) +{ + for (auto _ : state) { + Hacl_Hash_SHA3_Scalar_shake256(digest_shake_0.data(), + digest_shake_0.size(), + (uint8_t*)input.data(), + input.size()); + } +} + +BENCHMARK(Hacl_Sha3_shake256_Scalar)->Setup(DoSetup); + +#ifdef HACL_CAN_COMPILE_VEC256 +static void +Hacl_Sha3_shake256_Simd256(benchmark::State& state) +{ + if (!vec256_support()) { + state.SkipWithError("No vec256 support"); + return; + } + + for (auto _ : state) { + Hacl_Hash_SHA3_Simd256_shake256(digest_shake_0.data(), + digest_shake_1.data(), + digest_shake_2.data(), + digest_shake_3.data(), + digest_shake_0.size(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + (uint8_t*)input.data(), + input.size()); + } +} + +BENCHMARK(Hacl_Sha3_shake256_Simd256)->Setup(DoSetup); +#endif + BENCHMARK_MAIN(); diff --git a/config/config.json b/config/config.json index 7dd4cae35..32017d868 100644 --- a/config/config.json +++ b/config/config.json @@ -6,6 +6,10 @@ "vale_include_paths": [ "vale/include" ], + "libcrux_include_paths": [ + "libcrux/include", + "libcrux/eurydice" + ], "hacl_sources": { "nacl": [ { @@ -325,6 +329,38 @@ "file": "Hacl_Frodo_KEM.c", "features": "std" } + ], + "sha3-mb": [ + { + "file": "Hacl_Hash_SHA3_Scalar.c", + "features": "std" + }, + { + "file": "Hacl_Hash_SHA3_Simd256.c", + "features": "vec256" + } + ] + }, + "libcrux_sources": { + "kyber": [ + { + "file": "Libcrux_Kem_Kyber_Kyber768.c" + }, + { + "file": "libcrux_kyber512.c" + }, + { + "file": "libcrux_kyber768.c" + }, + { + "file": "libcrux_kyber1024.c" + }, + { + "file": "libcrux_hacl_glue.c" + }, + { + "file": "core.c" + } ] }, "vale_sources": { @@ -499,6 +535,11 @@ "aead.cc" ] }, + "libcrux_tests": { + "kyber": [ + "kyber.cc" + ] + }, "benchmarks": { "blake2": [ "blake.cc" @@ -545,5 +586,10 @@ "rsapss": [ "rsapss.cc" ] + }, + "libcrux_benchmarks": { + "kyber": [ + "kyber.cc" + ] } } diff --git a/config/default_config.cmake b/config/default_config.cmake index 9a05896e9..5efc99c76 100644 --- a/config/default_config.cmake +++ b/config/default_config.cmake @@ -6,8 +6,8 @@ set(SOURCES_std ${PROJECT_SOURCE_DIR}/src/Hacl_HMAC_DRBG.c ${PROJECT_SOURCE_DIR}/src/Hacl_HMAC.c ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_SHA2.c - ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Blake2b.c ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Blake2s.c + ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Blake2b.c ${PROJECT_SOURCE_DIR}/src/Hacl_Ed25519.c ${PROJECT_SOURCE_DIR}/src/Hacl_EC_Ed25519.c ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Base.c @@ -52,18 +52,20 @@ set(SOURCES_std ${PROJECT_SOURCE_DIR}/src/EverCrypt_Chacha20Poly1305.c ${PROJECT_SOURCE_DIR}/src/EverCrypt_Poly1305.c ${PROJECT_SOURCE_DIR}/src/EverCrypt_AEAD.c + ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_SHA3_Scalar.c ) set(SOURCES_vec256 ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Blake2b_Simd256.c ${PROJECT_SOURCE_DIR}/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c - ${PROJECT_SOURCE_DIR}/src/Hacl_MAC_Poly1305_Simd256.c ${PROJECT_SOURCE_DIR}/src/Hacl_Chacha20_Vec256.c + ${PROJECT_SOURCE_DIR}/src/Hacl_MAC_Poly1305_Simd256.c ${PROJECT_SOURCE_DIR}/src/Hacl_SHA2_Vec256.c ${PROJECT_SOURCE_DIR}/src/Hacl_HKDF_Blake2b_256.c ${PROJECT_SOURCE_DIR}/src/Hacl_HMAC_Blake2b_256.c ${PROJECT_SOURCE_DIR}/src/Hacl_HPKE_Curve51_CP256_SHA256.c ${PROJECT_SOURCE_DIR}/src/Hacl_HPKE_Curve51_CP256_SHA512.c ${PROJECT_SOURCE_DIR}/src/Hacl_HPKE_P256_CP256_SHA256.c + ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_SHA3_Simd256.c ) set(SOURCES_vec128 ${PROJECT_SOURCE_DIR}/src/Hacl_Hash_Blake2s_Simd128.c @@ -71,8 +73,8 @@ set(SOURCES_vec128 ${PROJECT_SOURCE_DIR}/src/Hacl_Bignum64.c ${PROJECT_SOURCE_DIR}/src/Hacl_GenericField64.c ${PROJECT_SOURCE_DIR}/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c - ${PROJECT_SOURCE_DIR}/src/Hacl_MAC_Poly1305_Simd128.c ${PROJECT_SOURCE_DIR}/src/Hacl_Chacha20_Vec128.c + ${PROJECT_SOURCE_DIR}/src/Hacl_MAC_Poly1305_Simd128.c ${PROJECT_SOURCE_DIR}/src/Hacl_SHA2_Vec128.c ${PROJECT_SOURCE_DIR}/src/Hacl_HKDF_Blake2s_128.c ${PROJECT_SOURCE_DIR}/src/Hacl_HMAC_Blake2s_128.c @@ -110,16 +112,16 @@ set(INCLUDES ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/LowStar_Endianness.h ${PROJECT_SOURCE_DIR}/include/Hacl_Salsa20.h ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Streaming_Types.h ${PROJECT_SOURCE_DIR}/include/Hacl_Krmllib.h ${PROJECT_SOURCE_DIR}/include/Hacl_Curve25519_51.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Krmllib.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Krmllib.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_DRBG.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Streaming_Types.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA2.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Ed25519.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_SHA2.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_SHA2.h @@ -136,14 +138,16 @@ set(INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_EC_Ed25519.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Base.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_Blake2b.h - ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_Blake2s.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Impl_Blake2_Constants.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2b.h - ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s.h ${PROJECT_SOURCE_DIR}/include/lib_memzero0.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2b_Simd256.h ${PROJECT_SOURCE_DIR}/include/libintvector.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_Bignum256_32.h ${PROJECT_SOURCE_DIR}/include/Hacl_Bignum.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Bignum.h @@ -157,16 +161,16 @@ set(INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_GenericField64.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec32.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Chacha20.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Chacha20.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305_Simd128.h - ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec128.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305_Simd256.h - ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd256.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec256.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_MAC_Poly1305_Simd256.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305_Simd256.h @@ -193,11 +197,15 @@ set(INCLUDES ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_SHA1.h ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_MD5.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_MD5.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_HMAC.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF_Blake2s_128.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF_Blake2b_256.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b_Simd256.h ${PROJECT_SOURCE_DIR}/include/Hacl_RSAPSS.h ${PROJECT_SOURCE_DIR}/include/Hacl_HPKE_Curve64_CP128_SHA256.h ${PROJECT_SOURCE_DIR}/include/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h @@ -238,7 +246,12 @@ set(INCLUDES ${PROJECT_SOURCE_DIR}/include/EverCrypt_Cipher.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_Chacha20Poly1305.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_AEAD.h + ${PROJECT_SOURCE_DIR}/include/internal/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA3_Simd256.h ) set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_NaCl.h @@ -249,15 +262,15 @@ set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/LowStar_Endianness.h ${PROJECT_SOURCE_DIR}/include/Hacl_Salsa20.h ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Streaming_Types.h ${PROJECT_SOURCE_DIR}/include/Hacl_Krmllib.h ${PROJECT_SOURCE_DIR}/include/Hacl_Curve25519_51.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Krmllib.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_DRBG.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Streaming_Types.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA2.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_SHA2.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Curve25519_51.h ${PROJECT_SOURCE_DIR}/include/lib_intrinsics.h @@ -268,11 +281,11 @@ set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_EC_Ed25519.h ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Base.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2b.h - ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s.h ${PROJECT_SOURCE_DIR}/include/lib_memzero0.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2b_Simd256.h ${PROJECT_SOURCE_DIR}/include/libintvector.h - ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_Blake2s_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_Bignum256_32.h ${PROJECT_SOURCE_DIR}/include/Hacl_Bignum.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Bignum.h @@ -285,14 +298,13 @@ set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_GenericField64.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec32.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Chacha20.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305_Simd128.h - ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec128.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_AEAD_Chacha20Poly1305_Simd256.h - ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd256.h ${PROJECT_SOURCE_DIR}/include/Hacl_Chacha20_Vec256.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_MAC_Poly1305_Simd256.h ${PROJECT_SOURCE_DIR}/include/Hacl_Curve25519_64.h @@ -307,11 +319,14 @@ set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/include/Hacl_SHA2_Vec256.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_SHA1.h ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_Hash_MD5.h + ${PROJECT_SOURCE_DIR}/include/internal/../Hacl_HMAC.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF_Blake2s_128.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2s_Simd128.h ${PROJECT_SOURCE_DIR}/include/Hacl_HKDF_Blake2b_256.h ${PROJECT_SOURCE_DIR}/include/Hacl_HMAC_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_Blake2b_Simd256.h ${PROJECT_SOURCE_DIR}/include/Hacl_RSAPSS.h ${PROJECT_SOURCE_DIR}/include/Hacl_HPKE_Curve64_CP128_SHA256.h ${PROJECT_SOURCE_DIR}/include/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h @@ -348,7 +363,11 @@ set(PUBLIC_INCLUDES ${PROJECT_SOURCE_DIR}/include/EverCrypt_Cipher.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_Chacha20Poly1305.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/Hacl_MAC_Poly1305_Simd128.h ${PROJECT_SOURCE_DIR}/include/EverCrypt_AEAD.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/Hacl_Hash_SHA3_Simd256.h ) set(ALGORITHMS nacl @@ -373,6 +392,7 @@ set(ALGORITHMS rsapss hpke frodo + sha3-mb ) set(INCLUDE_PATHS ${PROJECT_SOURCE_DIR}/include @@ -380,6 +400,7 @@ set(INCLUDE_PATHS ${PROJECT_SOURCE_DIR}/karamel/include ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal ${PROJECT_SOURCE_DIR}/vale/include + ${PROJECT_SOURCE_DIR}/libcrux/include ) set(TEST_SOURCES ${PROJECT_SOURCE_DIR}/tests/detection.cc @@ -406,6 +427,7 @@ set(TEST_SOURCES ${PROJECT_SOURCE_DIR}/tests/nacl.cc ${PROJECT_SOURCE_DIR}/tests/evercrypt.cc ${PROJECT_SOURCE_DIR}/tests/aead.cc + ${PROJECT_SOURCE_DIR}/tests/kyber.cc ) set(BENCHMARK_SOURCES ${PROJECT_SOURCE_DIR}/benchmarks/blake.cc @@ -423,6 +445,7 @@ set(BENCHMARK_SOURCES ${PROJECT_SOURCE_DIR}/benchmarks/drbg.cc ${PROJECT_SOURCE_DIR}/benchmarks/hmac.cc ${PROJECT_SOURCE_DIR}/benchmarks/rsapss.cc + ${PROJECT_SOURCE_DIR}/benchmarks/kyber.cc ) set(VALE_SOURCES_osx ${PROJECT_SOURCE_DIR}/vale/src/cpuid-x86_64-darwin.S @@ -452,6 +475,14 @@ set(VALE_SOURCES_msvc ${PROJECT_SOURCE_DIR}/vale/src/curve25519-x86_64-msvc.asm ${PROJECT_SOURCE_DIR}/vale/src/poly1305-x86_64-msvc.asm ) +set(LIBCRUX_SOURCES + ${PROJECT_SOURCE_DIR}/libcrux/src/Libcrux_Kem_Kyber_Kyber768.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber512.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber768.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber1024.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_hacl_glue.c + ${PROJECT_SOURCE_DIR}/libcrux/src/core.c +) set(ALGORITHM_TEST_FILES TEST_FILES_detection TEST_FILES_bignum @@ -474,6 +505,7 @@ set(ALGORITHM_TEST_FILES TEST_FILES_nacl TEST_FILES_evercrypt TEST_FILES_aead + TEST_FILES_kyber ) set(TEST_FILES_detection detection.cc @@ -541,3 +573,6 @@ set(TEST_FILES_evercrypt set(TEST_FILES_aead aead.cc ) +set(TEST_FILES_kyber + kyber.cc +) diff --git a/config/default_config_msvc.cmake b/config/default_config_msvc.cmake new file mode 100644 index 000000000..5950b0953 --- /dev/null +++ b/config/default_config_msvc.cmake @@ -0,0 +1,578 @@ +set(SOURCES_std + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_NaCl.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Salsa20.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_MAC_Poly1305.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Curve25519_51.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HMAC_DRBG.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HMAC.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_SHA2.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_Blake2s.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_Blake2b.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Ed25519.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_EC_Ed25519.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_Base.c + ${PROJECT_SOURCE_DIR}/src/msvc/Lib_Memzero0.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum256_32.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum32.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum4096_32.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_GenericField32.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_AEAD_Chacha20Poly1305.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Chacha20.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Chacha20_Vec32.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_P256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_K256_ECDSA.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_EC_K256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_FFDHE.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_SHA3.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_SHA1.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_MD5.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HKDF.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_RSAPSS.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Frodo1344.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Frodo_KEM.c + ${PROJECT_SOURCE_DIR}/src/msvc/Lib_RandomBuffer_System.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Frodo640.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Frodo976.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Frodo64.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_DRBG.c + ${PROJECT_SOURCE_DIR}/src/msvc/Lib_RandomBuffer_System.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_HMAC.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Hash.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_AutoConfig2.c + ${PROJECT_SOURCE_DIR}/src/msvc/Lib_Memzero0.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Ed25519.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Curve25519.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_HKDF.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Cipher.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Chacha20Poly1305.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_Poly1305.c + ${PROJECT_SOURCE_DIR}/src/msvc/EverCrypt_AEAD.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_SHA3_Scalar.c +) +set(SOURCES_vec256 + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_Blake2b_Simd256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Chacha20_Vec256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_MAC_Poly1305_Simd256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_SHA2_Vec256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HKDF_Blake2b_256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HMAC_Blake2b_256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_SHA3_Simd256.c +) +set(SOURCES_vec128 + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Hash_Blake2s_Simd128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum4096.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Bignum64.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_GenericField64.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Chacha20_Vec128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_MAC_Poly1305_Simd128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_SHA2_Vec128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HKDF_Blake2s_128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HMAC_Blake2s_128.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c +) +set(SOURCES_m32 + +) +set(SOURCES_vale + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_Curve25519_64.c +) +set(SOURCES_vec128_vale + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c +) +set(SOURCES_vec256_vale + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c +) +set(SOURCES_std_vale + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c + ${PROJECT_SOURCE_DIR}/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c +) +set(INCLUDES + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_NaCl.h + ${PROJECT_SOURCE_DIR}/karamel/include/krml/internal/types.h + ${PROJECT_SOURCE_DIR}/karamel/include/krml/lowstar_endianness.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/FStar_UInt128.h + ${PROJECT_SOURCE_DIR}/karamel/include/krml/internal/compat.h + ${PROJECT_SOURCE_DIR}/karamel/include/krml/internal/target.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/LowStar_Endianness.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Salsa20.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Streaming_Types.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Krmllib.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Curve25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Krmllib.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Krmllib.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_DRBG.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA2.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2b.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_SHA2.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA2.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Ed25519_PrecompTable.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Curve25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Bignum25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Curve25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Bignum_Base.h + ${PROJECT_SOURCE_DIR}/include/msvc/lib_intrinsics.h + ${PROJECT_SOURCE_DIR}/build/config.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_IntTypes_Intrinsics.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_IntTypes_Intrinsics_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_EC_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Base.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_Blake2b.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Impl_Blake2_Constants.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2b.h + ${PROJECT_SOURCE_DIR}/include/msvc/lib_memzero0.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/libintvector.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum256_32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Bignum.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Bignum.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum4096_32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum4096.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum64.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_GenericField32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_GenericField64.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec32.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_MAC_Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Curve25519_64.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Vale.h + ${PROJECT_SOURCE_DIR}/include/msvc/curve25519-inline.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_P256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_P256_PrecompTable.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_P256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_K256_ECDSA.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_K256_PrecompTable.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Bignum_K256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_K256_ECDSA.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_EC_K256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_FFDHE.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Spec.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_SHA3.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA3.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_SHA2_Vec128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_SHA2_Types.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_SHA2_Vec256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_SHA1.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA1.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_MD5.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_MD5.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_RSAPSS.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo1344.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Spec.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Spec.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Frodo_KEM.h + ${PROJECT_SOURCE_DIR}/include/msvc/Lib_RandomBuffer_System.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo640.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo976.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo64.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_DRBG.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/EverCrypt_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/EverCrypt_Hash.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../EverCrypt_Hash.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Error.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_AutoConfig2.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../EverCrypt_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Curve25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_HKDF.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Cipher.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Chacha20Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_AEAD.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3_Simd256.h +) +set(PUBLIC_INCLUDES + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_NaCl.h + ${PROJECT_SOURCE_DIR}/karamel/include/krml/lowstar_endianness.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/FStar_UInt128.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/FStar_UInt_8_16_32_64.h + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal/LowStar_Endianness.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Salsa20.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Streaming_Types.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Krmllib.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Curve25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Krmllib.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_DRBG.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA2.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2b.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA2.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Curve25519_51.h + ${PROJECT_SOURCE_DIR}/include/msvc/lib_intrinsics.h + ${PROJECT_SOURCE_DIR}/build/config.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_IntTypes_Intrinsics.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_IntTypes_Intrinsics_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_EC_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Base.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2b.h + ${PROJECT_SOURCE_DIR}/include/msvc/lib_memzero0.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2s.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/libintvector.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum256_32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Bignum.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum4096_32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum4096.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Bignum64.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_GenericField32.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_GenericField64.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec32.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Chacha20.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec128.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Chacha20_Vec256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Curve25519_64.h + ${PROJECT_SOURCE_DIR}/include/msvc/curve25519-inline.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_P256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_K256_ECDSA.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_EC_K256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_FFDHE.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Spec.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA3.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_SHA2_Vec128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_SHA2_Vec256.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_SHA1.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Hash_MD5.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_Blake2s_128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2s_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HKDF_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HMAC_Blake2b_256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_Blake2b_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_RSAPSS.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo1344.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../Hacl_Spec.h + ${PROJECT_SOURCE_DIR}/include/msvc/Lib_RandomBuffer_System.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo640.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo976.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Frodo64.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_DRBG.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../EverCrypt_Hash.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Error.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_AutoConfig2.h + ${PROJECT_SOURCE_DIR}/include/msvc/internal/../EverCrypt_HMAC.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Ed25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Curve25519.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_HKDF.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Cipher.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Chacha20Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_Poly1305.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305_Simd256.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_MAC_Poly1305_Simd128.h + ${PROJECT_SOURCE_DIR}/include/msvc/EverCrypt_AEAD.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3_Scalar.h + ${PROJECT_SOURCE_DIR}/include/msvc/Hacl_Hash_SHA3_Simd256.h +) +set(ALGORITHMS + nacl + salsa20 + aesgcm + drbg + ed25519 + blake2 + bignum + generic-field + chacha20poly1305 + curve25519 + p256 + k256 + ffdh + sha3 + sha2 + sha1 + md5 + hmac + hkdf + rsapss + hpke + frodo + sha3-mb +) +set(INCLUDE_PATHS + ${PROJECT_SOURCE_DIR}/include/msvc + ${PROJECT_SOURCE_DIR}/build + ${PROJECT_SOURCE_DIR}/karamel/include + ${PROJECT_SOURCE_DIR}/karamel/krmllib/dist/minimal + ${PROJECT_SOURCE_DIR}/vale/include + ${PROJECT_SOURCE_DIR}/libcrux/include +) +set(TEST_SOURCES + ${PROJECT_SOURCE_DIR}/tests/detection.cc + ${PROJECT_SOURCE_DIR}/tests/bignum.cc + ${PROJECT_SOURCE_DIR}/tests/generic_field.cc + ${PROJECT_SOURCE_DIR}/tests/blake2b.cc + ${PROJECT_SOURCE_DIR}/tests/blake2s.cc + ${PROJECT_SOURCE_DIR}/tests/p256_ecdh.cc + ${PROJECT_SOURCE_DIR}/tests/p256_ecdsa.cc + ${PROJECT_SOURCE_DIR}/tests/k256_ecdh.cc + ${PROJECT_SOURCE_DIR}/tests/k256_ecdsa.cc + ${PROJECT_SOURCE_DIR}/tests/chacha20poly1305.cc + ${PROJECT_SOURCE_DIR}/tests/ed25519.cc + ${PROJECT_SOURCE_DIR}/tests/x25519.cc + ${PROJECT_SOURCE_DIR}/tests/rsapss.cc + ${PROJECT_SOURCE_DIR}/tests/hkdf.cc + ${PROJECT_SOURCE_DIR}/tests/poly1305.cc + ${PROJECT_SOURCE_DIR}/tests/hmac.cc + ${PROJECT_SOURCE_DIR}/tests/drbg.cc + ${PROJECT_SOURCE_DIR}/tests/md5.cc + ${PROJECT_SOURCE_DIR}/tests/sha1.cc + ${PROJECT_SOURCE_DIR}/tests/sha2.cc + ${PROJECT_SOURCE_DIR}/tests/sha3.cc + ${PROJECT_SOURCE_DIR}/tests/nacl.cc + ${PROJECT_SOURCE_DIR}/tests/evercrypt.cc + ${PROJECT_SOURCE_DIR}/tests/aead.cc + ${PROJECT_SOURCE_DIR}/tests/kyber.cc +) +set(BENCHMARK_SOURCES + ${PROJECT_SOURCE_DIR}/benchmarks/blake.cc + ${PROJECT_SOURCE_DIR}/benchmarks/chacha20.cc + ${PROJECT_SOURCE_DIR}/benchmarks/chacha20poly1305.cc + ${PROJECT_SOURCE_DIR}/benchmarks/x25519.cc + ${PROJECT_SOURCE_DIR}/benchmarks/ed25519.cc + ${PROJECT_SOURCE_DIR}/benchmarks/nacl.cc + ${PROJECT_SOURCE_DIR}/benchmarks/p256.cc + ${PROJECT_SOURCE_DIR}/benchmarks/sha1.cc + ${PROJECT_SOURCE_DIR}/benchmarks/sha2.cc + ${PROJECT_SOURCE_DIR}/benchmarks/sha3.cc + ${PROJECT_SOURCE_DIR}/benchmarks/k256.cc + ${PROJECT_SOURCE_DIR}/benchmarks/kdf.cc + ${PROJECT_SOURCE_DIR}/benchmarks/drbg.cc + ${PROJECT_SOURCE_DIR}/benchmarks/hmac.cc + ${PROJECT_SOURCE_DIR}/benchmarks/rsapss.cc + ${PROJECT_SOURCE_DIR}/benchmarks/kyber.cc +) +set(VALE_SOURCES_osx + ${PROJECT_SOURCE_DIR}/vale/src/cpuid-x86_64-darwin.S + ${PROJECT_SOURCE_DIR}/vale/src/sha256-x86_64-darwin.S + ${PROJECT_SOURCE_DIR}/vale/src/aesgcm-x86_64-darwin.S + ${PROJECT_SOURCE_DIR}/vale/src/curve25519-x86_64-darwin.S + ${PROJECT_SOURCE_DIR}/vale/src/poly1305-x86_64-darwin.S +) +set(VALE_SOURCES_linux + ${PROJECT_SOURCE_DIR}/vale/src/cpuid-x86_64-linux.S + ${PROJECT_SOURCE_DIR}/vale/src/sha256-x86_64-linux.S + ${PROJECT_SOURCE_DIR}/vale/src/aesgcm-x86_64-linux.S + ${PROJECT_SOURCE_DIR}/vale/src/curve25519-x86_64-linux.S + ${PROJECT_SOURCE_DIR}/vale/src/poly1305-x86_64-linux.S +) +set(VALE_SOURCES_mingw + ${PROJECT_SOURCE_DIR}/vale/src/cpuid-x86_64-mingw.S + ${PROJECT_SOURCE_DIR}/vale/src/sha256-x86_64-mingw.S + ${PROJECT_SOURCE_DIR}/vale/src/aesgcm-x86_64-mingw.S + ${PROJECT_SOURCE_DIR}/vale/src/curve25519-x86_64-mingw.S + ${PROJECT_SOURCE_DIR}/vale/src/poly1305-x86_64-mingw.S +) +set(VALE_SOURCES_msvc + ${PROJECT_SOURCE_DIR}/vale/src/cpuid-x86_64-msvc.asm + ${PROJECT_SOURCE_DIR}/vale/src/sha256-x86_64-msvc.asm + ${PROJECT_SOURCE_DIR}/vale/src/aesgcm-x86_64-msvc.asm + ${PROJECT_SOURCE_DIR}/vale/src/curve25519-x86_64-msvc.asm + ${PROJECT_SOURCE_DIR}/vale/src/poly1305-x86_64-msvc.asm +) +set(LIBCRUX_SOURCES + ${PROJECT_SOURCE_DIR}/libcrux/src/Libcrux_Kem_Kyber_Kyber768.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber512.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber768.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_kyber1024.c + ${PROJECT_SOURCE_DIR}/libcrux/src/libcrux_hacl_glue.c + ${PROJECT_SOURCE_DIR}/libcrux/src/core.c +) +set(ALGORITHM_TEST_FILES + TEST_FILES_detection + TEST_FILES_bignum + TEST_FILES_generic_field + TEST_FILES_blake2 + TEST_FILES_p256 + TEST_FILES_k256 + TEST_FILES_chacha20poly1305 + TEST_FILES_ed25519 + TEST_FILES_curve25519 + TEST_FILES_rsapss + TEST_FILES_hkdf + TEST_FILES_poly1305 + TEST_FILES_hmac + TEST_FILES_drbg + TEST_FILES_md5 + TEST_FILES_sha1 + TEST_FILES_sha2 + TEST_FILES_sha3 + TEST_FILES_nacl + TEST_FILES_evercrypt + TEST_FILES_aead + TEST_FILES_kyber +) +set(TEST_FILES_detection + detection.cc +) +set(TEST_FILES_bignum + bignum.cc +) +set(TEST_FILES_generic_field + generic_field.cc +) +set(TEST_FILES_blake2 + blake2b.cc + blake2s.cc +) +set(TEST_FILES_p256 + p256_ecdh.cc + p256_ecdsa.cc +) +set(TEST_FILES_k256 + k256_ecdh.cc + k256_ecdsa.cc +) +set(TEST_FILES_chacha20poly1305 + chacha20poly1305.cc +) +set(TEST_FILES_ed25519 + ed25519.cc +) +set(TEST_FILES_curve25519 + x25519.cc +) +set(TEST_FILES_rsapss + rsapss.cc +) +set(TEST_FILES_hkdf + hkdf.cc +) +set(TEST_FILES_poly1305 + poly1305.cc +) +set(TEST_FILES_hmac + hmac.cc +) +set(TEST_FILES_drbg + drbg.cc +) +set(TEST_FILES_md5 + md5.cc +) +set(TEST_FILES_sha1 + sha1.cc +) +set(TEST_FILES_sha2 + sha2.cc +) +set(TEST_FILES_sha3 + sha3.cc +) +set(TEST_FILES_nacl + nacl.cc +) +set(TEST_FILES_evercrypt + evercrypt.cc +) +set(TEST_FILES_aead + aead.cc +) +set(TEST_FILES_kyber + kyber.cc +) diff --git a/include/Hacl_Hash_SHA3_Scalar.h b/include/Hacl_Hash_SHA3_Scalar.h new file mode 100644 index 000000000..a40c2d04d --- /dev/null +++ b/include/Hacl_Hash_SHA3_Scalar.h @@ -0,0 +1,135 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Scalar_H +#define __Hacl_Hash_SHA3_Scalar_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +void +Hacl_Hash_SHA3_Scalar_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Scalar_shake256( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +); + +void Hacl_Hash_SHA3_Scalar_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_Scalar_state_malloc(void); + +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_Scalar_state_free(uint64_t *s); + +/** +Absorb number of input blocks and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +); + +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +); + +/** +Squeeze a hash state to output buffer + + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Scalar_H_DEFINED +#endif diff --git a/include/Hacl_Hash_SHA3_Simd256.h b/include/Hacl_Hash_SHA3_Simd256.h new file mode 100644 index 000000000..f38bf7cbb --- /dev/null +++ b/include/Hacl_Hash_SHA3_Simd256.h @@ -0,0 +1,226 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Simd256_H +#define __Hacl_Hash_SHA3_Simd256_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "libintvector.h" + +typedef struct K____uint8_t___uint8_t__s +{ + uint8_t *fst; + uint8_t *snd; +} +K____uint8_t___uint8_t_; + +typedef struct K____uint8_t__K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t___uint8_t_ snd; +} +K____uint8_t__K____uint8_t___uint8_t_; + +typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t__K____uint8_t___uint8_t_ snd; +} +K____uint8_t___uint8_t____K____uint8_t___uint8_t_; + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void); + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s); + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Simd256_H_DEFINED +#endif diff --git a/include/internal/Hacl_Hash_SHA3_Scalar.h b/include/internal/Hacl_Hash_SHA3_Scalar.h new file mode 100644 index 000000000..88c93d178 --- /dev/null +++ b/include/internal/Hacl_Hash_SHA3_Scalar.h @@ -0,0 +1,51 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __internal_Hacl_Hash_SHA3_Scalar_H +#define __internal_Hacl_Hash_SHA3_Scalar_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "../Hacl_Hash_SHA3_Scalar.h" + +extern const uint32_t Hacl_Impl_SHA3_Vec_keccak_rotc[24U]; + +extern const uint32_t Hacl_Impl_SHA3_Vec_keccak_piln[24U]; + +extern const uint64_t Hacl_Impl_SHA3_Vec_keccak_rndc[24U]; + +#if defined(__cplusplus) +} +#endif + +#define __internal_Hacl_Hash_SHA3_Scalar_H_DEFINED +#endif diff --git a/include/msvc/Hacl_Hash_SHA3_Scalar.h b/include/msvc/Hacl_Hash_SHA3_Scalar.h new file mode 100644 index 000000000..a40c2d04d --- /dev/null +++ b/include/msvc/Hacl_Hash_SHA3_Scalar.h @@ -0,0 +1,135 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Scalar_H +#define __Hacl_Hash_SHA3_Scalar_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +void +Hacl_Hash_SHA3_Scalar_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Scalar_shake256( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +); + +void Hacl_Hash_SHA3_Scalar_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +void Hacl_Hash_SHA3_Scalar_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen); + +/** +Allocate state buffer of 200-bytes +*/ +uint64_t *Hacl_Hash_SHA3_Scalar_state_malloc(void); + +/** +Free state buffer +*/ +void Hacl_Hash_SHA3_Scalar_state_free(uint64_t *s); + +/** +Absorb number of input blocks and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses an input of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +); + +/** +Absorb a final partial block of input and write the output state + + This function is intended to receive a hash state and input buffer. + It prcoesses a sequence of bytes at end of input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffer are ignored. + + The argument `state` (IN/OUT) points to hash state, i.e., uint64_t[25] + The argument `input` (IN) points to `inputByteLen` bytes of valid memory, + i.e., uint8_t[inputByteLen] + + Note: Full size of input buffer must be passed to `inputByteLen` including + the number of full-block bytes at start of input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +); + +/** +Squeeze a hash state to output buffer + + This function is intended to receive a hash state and output buffer. + It produces an output of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block are ignored. + + The argument `state` (IN) points to hash state, i.e., uint64_t[25] + The argument `output` (OUT) points to `outputByteLen` bytes of valid memory, + i.e., uint8_t[outputByteLen] +*/ +void +Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Scalar_H_DEFINED +#endif diff --git a/include/msvc/Hacl_Hash_SHA3_Simd256.h b/include/msvc/Hacl_Hash_SHA3_Simd256.h new file mode 100644 index 000000000..302094a43 --- /dev/null +++ b/include/msvc/Hacl_Hash_SHA3_Simd256.h @@ -0,0 +1,226 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Hash_SHA3_Simd256_H +#define __Hacl_Hash_SHA3_Simd256_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "libintvector.h" + +typedef struct K____uint8_t___uint8_t__s +{ + uint8_t *fst; + uint8_t *snd; +} +K____uint8_t___uint8_t_; + +typedef struct K____uint8_t__K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t___uint8_t_ snd; +} +K____uint8_t__K____uint8_t___uint8_t_; + +typedef struct K____uint8_t___uint8_t____K____uint8_t___uint8_t__s +{ + uint8_t *fst; + K____uint8_t__K____uint8_t___uint8_t_ snd; +} +K____uint8_t___uint8_t____K____uint8_t___uint8_t_; + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Allocate quadruple state buffer (200-bytes for each) +*/ +uint64_t *Hacl_Hash_SHA3_Simd256_state_malloc(void); + +/** +Free quadruple state buffer +*/ +void Hacl_Hash_SHA3_Simd256_state_free(uint64_t *s); + +/** +Absorb number of blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses an inputs of multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Absorb a final partial blocks of 4 input buffers and write the output states + + This function is intended to receive a quadruple hash state and 4 input buffers. + It prcoesses a sequence of bytes at end of each input buffer that is less + than 168-bytes (SHAKE128 block size), + any bytes of full blocks at start of input buffers are ignored. + + The argument `state` (IN/OUT) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `input0/input1/input2/input3` (IN) point to `inputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] + + Note: Full size of input buffers must be passed to `inputByteLen` including + the number of full-block bytes at start of each input buffer that are ignored +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +); + +/** +Squeeze a quadruple hash state to 4 output buffers + + This function is intended to receive a quadruple hash state and 4 output buffers. + It produces 4 outputs, each is multiple of 168-bytes (SHAKE128 block size), + any additional bytes of final partial block for each buffer are ignored. + + The argument `state` (IN) points to quadruple hash state, + i.e., Lib_IntVector_Intrinsics_vec256[25] + The arguments `output0/output1/output2/output3` (OUT) point to `outputByteLen` bytes + of valid memory for each buffer, i.e., uint8_t[inputByteLen] +*/ +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Hash_SHA3_Simd256_H_DEFINED +#endif diff --git a/include/msvc/internal/Hacl_Hash_SHA3_Scalar.h b/include/msvc/internal/Hacl_Hash_SHA3_Scalar.h new file mode 100644 index 000000000..88c93d178 --- /dev/null +++ b/include/msvc/internal/Hacl_Hash_SHA3_Scalar.h @@ -0,0 +1,51 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __internal_Hacl_Hash_SHA3_Scalar_H +#define __internal_Hacl_Hash_SHA3_Scalar_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/internal/types.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#include "../Hacl_Hash_SHA3_Scalar.h" + +extern const uint32_t Hacl_Impl_SHA3_Vec_keccak_rotc[24U]; + +extern const uint32_t Hacl_Impl_SHA3_Vec_keccak_piln[24U]; + +extern const uint64_t Hacl_Impl_SHA3_Vec_keccak_rndc[24U]; + +#if defined(__cplusplus) +} +#endif + +#define __internal_Hacl_Hash_SHA3_Scalar_H_DEFINED +#endif diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h index cd1448ddb..6ff658f54 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_msvc.h @@ -217,7 +217,7 @@ static const uint32_t FStar_UInt128_u32_64 = (uint32_t)64U; inline static uint64_t FStar_UInt128_add_u64_shift_left(uint64_t hi, uint64_t lo, uint32_t s) { - return (hi << s) + (lo >> FStar_UInt128_u32_64 - s); + return (hi << s) + (lo >> (FStar_UInt128_u32_64 - s)); } inline static uint64_t @@ -241,7 +241,7 @@ inline static FStar_UInt128_uint128 FStar_UInt128_shift_left_large(FStar_UInt128_uint128 a, uint32_t s) { FStar_UInt128_uint128 lit; lit.low = (uint64_t)0U; - lit.high = a.low << s - FStar_UInt128_u32_64; + lit.high = a.low << (s - FStar_UInt128_u32_64); return lit; } @@ -267,7 +267,7 @@ FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s) { inline static uint64_t FStar_UInt128_add_u64_shift_right(uint64_t hi, uint64_t lo, uint32_t s) { - return (lo >> s) + (hi << FStar_UInt128_u32_64 - s); + return (lo >> s) + (hi << (FStar_UInt128_u32_64 - s)); } inline static uint64_t @@ -290,7 +290,7 @@ FStar_UInt128_shift_right_small(FStar_UInt128_uint128 a, uint32_t s) { inline static FStar_UInt128_uint128 FStar_UInt128_shift_right_large(FStar_UInt128_uint128 a, uint32_t s) { FStar_UInt128_uint128 lit; - lit.low = a.high >> s - FStar_UInt128_u32_64; + lit.low = a.high >> (s - FStar_UInt128_u32_64); lit.high = (uint64_t)0U; return lit; } @@ -488,7 +488,7 @@ FStar_UInt128_mul_wide_impl(uint64_t x, uint64_t y) { u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_), w3); lit.high = x_ * (y >> FStar_UInt128_u32_32) + (t_ >> FStar_UInt128_u32_32) + - (u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_) >> + ((u1 * (y >> FStar_UInt128_u32_32) + FStar_UInt128_u64_mod_32(t_)) >> FStar_UInt128_u32_32); return lit; } diff --git a/libcrux/include/Eurydice.h b/libcrux/include/Eurydice.h new file mode 100644 index 000000000..fdfac1909 --- /dev/null +++ b/libcrux/include/Eurydice.h @@ -0,0 +1,20 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: /Users/franziskus/repos/eurydice//eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: a32b316e + KaRaMeL version: abb38e1d + */ + +#ifndef __Eurydice_H +#define __Eurydice_H + +#include "core.h" +#include "eurydice_glue.h" + +extern uint8_t Eurydice_bitand_pv_u8(uint8_t *x, uint8_t y); + +extern uint8_t Eurydice_shr_pv_u8(uint8_t *x, int32_t y); + + +#define __Eurydice_H_DEFINED +#endif diff --git a/libcrux/include/Libcrux_Kem_Kyber_Kyber768.h b/libcrux/include/Libcrux_Kem_Kyber_Kyber768.h new file mode 100644 index 000000000..f82fce7c7 --- /dev/null +++ b/libcrux/include/Libcrux_Kem_Kyber_Kyber768.h @@ -0,0 +1,33 @@ +#ifndef __Libcrux_Kem_Kyber_Kyber768_H +#define __Libcrux_Kem_Kyber_Kyber768_H + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#include + +#define KYBER768_SECRETKEYBYTES 2400 +#define KYBER768_PUBLICKEYBYTES 1184 +#define KYBER768_CIPHERTEXTBYTES 1088 +#define KYBER768_SHAREDSECRETBYTES 32 + void Libcrux_Kyber768_GenerateKeyPair(uint8_t* pk, + uint8_t* sk, + uint8_t randomness[64]); + + void Libcrux_Kyber768_Encapsulate(uint8_t* ct, + uint8_t* ss, + uint8_t (*pk)[1184], + uint8_t randomness[32]); + + void Libcrux_Kyber768_Decapsulate(uint8_t ss[32U], + uint8_t (*ct)[1088U], + uint8_t (*sk)[2400U]); + +#if defined(__cplusplus) +} +#endif + +#define __Libcrux_Kem_Kyber_Kyber768_H_DEFINED +#endif diff --git a/libcrux/include/core.h b/libcrux/include/core.h new file mode 100644 index 000000000..462ce5d78 --- /dev/null +++ b/libcrux/include/core.h @@ -0,0 +1,38 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __core_H +#define __core_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "eurydice_glue.h" + +typedef struct core_ops_range_Range__size_t_s +{ + size_t start; + size_t end; +} +core_ops_range_Range__size_t; + +extern uint8_t Eurydice_bitand_pv_u8(uint8_t *x, uint8_t y); + +extern uint8_t Eurydice_shr_pv_u8(uint8_t *x, int32_t y); + +#define core_option_None 0 +#define core_option_Some 1 + +typedef uint8_t core_option_Option__size_t_tags; + +#if defined(__cplusplus) +} +#endif + +#define __core_H_DEFINED +#endif diff --git a/libcrux/include/eurydice_glue.h b/libcrux/include/eurydice_glue.h new file mode 100644 index 000000000..a0f8eff89 --- /dev/null +++ b/libcrux/include/eurydice_glue.h @@ -0,0 +1,220 @@ +#pragma once + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + +#define LowStar_Ignore_ignore(e, t, _ret_t) ((void)e) + +// SLICES, ARRAYS, ETC. + +// We represent a slice as a pair of an (untyped) pointer, along with the length of the slice, i.e. +// the number of elements in the slice (this is NOT the number of bytes). This design choice has two +// important consequences. +// - if you need to use `ptr`, you MUST cast it to a proper type *before* performing pointer +// arithmetic on it (remember that C desugars pointer arithmetic based on the type of the address) +// - if you need to use `len` for a C style function (e.g. memcpy, memcmp), you need to multiply it +// by sizeof t, where t is the type of the elements. +typedef struct { + void *ptr; + size_t len; +} Eurydice_slice; + +// Helper macro to create a slice out of a pointer x, a start index in x (included), and an end +// index in x (excluded). The argument x must be suitably cast to something that can decay (see +// remark above about how pointer arithmetic works in C), meaning either pointer or array type. +#define EURYDICE_SLICE(x, start, end) ((Eurydice_slice){ .ptr = (void*)(x + start), .len = end - start }) +#define EURYDICE_SLICE_LEN(s, _) s.len +#define Eurydice_slice_index(s, i, t, _ret_t) (((t*) s.ptr)[i]) +#define Eurydice_slice_subslice(s, r, t, _, _ret_t) EURYDICE_SLICE((t*)s.ptr, r.start, r.end) +#define Eurydice_slice_subslice_to(s, subslice_end_pos, t, _, _ret_t) EURYDICE_SLICE((t*)s.ptr, 0, subslice_end_pos) +#define Eurydice_slice_subslice_from(s, subslice_start_pos, t, _, _ret_t) EURYDICE_SLICE((t*)s.ptr, subslice_start_pos, s.len) +#define Eurydice_array_to_slice(end, x, t, _ret_t) EURYDICE_SLICE(x, 0, end) /* x is already at an array type, no need for cast */ +#define Eurydice_array_to_subslice(_arraylen, x, r, t, _, _ret_t) EURYDICE_SLICE((t*)x, r.start, r.end) +#define Eurydice_array_to_subslice_to(_size, x, r, t, _range_t, _ret_t) EURYDICE_SLICE((t*)x, 0, r) +#define Eurydice_array_to_subslice_from(size, x, r, t, _range_t, _ret_t) EURYDICE_SLICE((t*)x, r, size) +#define Eurydice_array_repeat(dst, len, init, t, _ret_t) ERROR "should've been desugared" +#define core_slice___Slice_T___len(s, t, _ret_t) EURYDICE_SLICE_LEN(s, t) +#define core_slice___Slice_T___copy_from_slice(dst, src, t, _ret_t) memcpy(dst.ptr, src.ptr, dst.len * sizeof(t)) +#define core_array___Array_T__N__23__as_slice(len_, ptr_, t, _ret_t) ((Eurydice_slice){ .ptr = ptr_, .len = len_ }) + +#define core_array_TryFromSliceError uint8_t + +#define Eurydice_array_eq(sz, a1, a2, t, _, _ret_t) (memcmp(a1, a2, sz * sizeof(t)) == 0) +#define core_array_equality___core__cmp__PartialEq__Array_B__N___for__Array_A__N____eq Eurydice_array_eq + +#define core_slice___Slice_T___split_at(slice, mid, element_type, ret_t) \ + ((ret_t){ \ + .fst = EURYDICE_SLICE((element_type*)slice.ptr, 0, mid), \ + .snd = EURYDICE_SLICE((element_type*)slice.ptr, mid, slice.len)}) + +// Can't have a flexible array as a member of a union -- this violates strict aliasing rules. +typedef struct +{ + uint8_t tag; + uint8_t case_Ok[]; +} +result_tryfromslice_flexible; + +// See note in karamel/lib/Inlining.ml if you change this +#define Eurydice_slice_to_array2(dst, src, _, t_arr, _ret_t) Eurydice_slice_to_array3((result_tryfromslice_flexible *)dst, src, sizeof(t_arr)) + +static inline void Eurydice_slice_to_array3(result_tryfromslice_flexible *dst, Eurydice_slice src, size_t sz) { + dst->tag = 0; + memcpy(dst->case_Ok, src.ptr, sz); +} + +// CORE STUFF (conversions, endianness, ...) + +static inline void core_num__u32_8__to_be_bytes(uint32_t src, uint8_t dst[4]) { + uint32_t x = htobe32(src); + memcpy(dst, &x, 4); +} + +static inline int64_t +core_convert_num___core__convert__From_i32__for_i64__59__from(int32_t x) +{ + return x; +} + +// unsigned overflow wraparound semantics in C +static inline uint16_t core_num__u16_7__wrapping_add(uint16_t x, uint16_t y) { return x + y; } +static inline uint8_t core_num__u8_6__wrapping_sub(uint8_t x, uint8_t y) { return x - y; } + +static inline void core_ops_arith__i32_319__add_assign(int32_t *x0, int32_t *x1) { + *x0 = *x0 + *x1; +} + +static inline uint8_t Eurydice_bitand_pv_u8(uint8_t *p, uint8_t v) { return (*p) & v; } +static inline uint8_t Eurydice_shr_pv_u8(uint8_t *p, int32_t v) { return (*p) >> v; } + +// ITERATORS + +#define core_num_nonzero_NonZeroUsize size_t +#define Eurydice_range_iter_next(iter_ptr, t, ret_t) ( \ + ((iter_ptr)->start == (iter_ptr)->end) ? \ + ((ret_t) { .tag = core_option_None }) : \ + ((ret_t) { .tag = core_option_Some, .f0 = (iter_ptr)->start++ }) \ + ) + +#define core_iter_range___core__iter__traits__iterator__Iterator_for_core__ops__range__Range_A___3__next Eurydice_range_iter_next + +// See note in karamel/lib/Inlining.ml if you change this +#define Eurydice_into_iter(x, t, _ret_t) (x) +#define core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter Eurydice_into_iter + +typedef struct { + Eurydice_slice slice; + size_t chunk_size; +} Eurydice_chunks; + + +// Can't use macros Eurydice_slice_subslice_{to,from} because they require a type, and this static +// inline function cannot receive a type as an argument. Instead, we receive the element size and +// use it to peform manual offset computations rather than going through the macros. +static inline Eurydice_slice chunk_next(Eurydice_chunks *chunks, size_t element_size) { + size_t chunk_size = chunks->slice.len >= chunks->chunk_size ? chunks->chunk_size : chunks->slice.len; + Eurydice_slice curr_chunk; + curr_chunk.ptr = chunks->slice.ptr; + curr_chunk.len = chunk_size; + chunks->slice.ptr = (char *)(chunks->slice.ptr) + chunk_size * element_size; + chunks->slice.len = chunks->slice.len - chunk_size; + return curr_chunk; +} + +#define core_slice___Slice_T___chunks(slice_, sz_, t, _ret_t) ((Eurydice_chunks){ .slice = slice_, .chunk_size = sz_ }) +#define core_slice___Slice_T___chunks_exact(slice_, sz_, t, _ret_t) ((Eurydice_chunks){ \ + .slice = { .ptr = slice_.ptr, .len = slice_.len - (slice_.len % sz_) }, \ + .chunk_size = sz_ }) +#define core_slice_iter_Chunks Eurydice_chunks +#define core_slice_iter_ChunksExact Eurydice_chunks +#define core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(iter, t, ret_t) \ + (((iter)->slice.len == 0) ? \ + ((ret_t) { .tag = core_option_None }) : \ + ((ret_t){ \ + .tag = core_option_Some, \ + .f0 = chunk_next(iter, sizeof(t)) })) +#define core_slice_iter__core__slice__iter__ChunksExact__a__T__89__next(iter, t, _ret_t) \ + core_slice_iter__core__slice__iter__Chunks__a__T__70__next(iter, t) + +typedef struct { + Eurydice_slice s; + size_t index; +} Eurydice_slice_iterator; + +#define core_slice___Slice_T___iter(x, t, _ret_t) ((Eurydice_slice_iterator){ .s = x, .index = 0 }) +#define core_slice_iter_Iter Eurydice_slice_iterator +#define core_slice_iter__core__slice__iter__Iter__a__T__181__next(iter, t, ret_t) \ + (((iter)->index == (iter)->s.len) ? \ + ((ret_t) { .tag = core_option_None }) : \ + ((ret_t){ \ + .tag = core_option_Some, \ + .f0 = ((iter)->index++, &((t*)((iter)->s.ptr))[(iter)->index-1]) })) + +// MISC + +#define core_fmt_Formatter void + + +// VECTORS + +/* For now these are passed by value -- three words. We could conceivably change + * the representation to heap-allocate this struct and only pass around the + * pointer (one word). */ +typedef struct { + void *ptr; + size_t len; /* the number of elements */ + size_t alloc_size; /* the size of the allocation, in number of BYTES */ +} Eurydice_vec_s, *Eurydice_vec; + +/* Here, we set everything to zero rather than use a non-standard GCC + * statement-expression -- this suitably initializes ptr to NULL and len and + * size to 0. */ +#define EURYDICE_VEC_NEW(_) calloc(1, sizeof(Eurydice_vec_s)) +#define EURYDICE_VEC_PUSH(v, x, t) \ + do { \ + /* Grow the vector if capacity has been reached. */ \ + if (v->len == v->alloc_size/sizeof(t)) { \ + /* Assuming that this does not exceed SIZE_MAX, because code proven \ + * correct by Aeneas. Would this even happen in practice? */ \ + size_t new_size; \ + if (v->alloc_size == 0) \ + new_size = 8 * sizeof(t); \ + else if (v->alloc_size <= SIZE_MAX/2) \ + /* TODO: discuss growth policy */ \ + new_size = 2 * v->alloc_size; \ + else \ + new_size = (SIZE_MAX/sizeof(t))*sizeof(t); \ + v->ptr = realloc(v->ptr, new_size); \ + v->alloc_size = new_size; \ + } \ + ((t*)v->ptr)[v->len] = x; \ + v->len++; \ + } while (0) + +#define EURYDICE_VEC_DROP(v, t) \ + do { \ + free(v->ptr); \ + free(v); \ + } while (0) + +#define EURYDICE_VEC_INDEX(v, i, t) &((t*) v->ptr)[i] +#define EURYDICE_VEC_LEN(v, t) (v)->len + +/* TODO: remove GCC-isms */ +#define EURYDICE_BOX_NEW(x, t) ({ t *p = malloc(sizeof(t)); *p = x; p; }) + +#define EURYDICE_REPLACE(ptr, new_v, t) ({ t old_v = *ptr; *ptr = new_v; old_v; }) + +#if defined(__cplusplus) +} +#endif diff --git a/libcrux/include/internal/core.h b/libcrux/include/internal/core.h new file mode 100644 index 000000000..1e1c0fc19 --- /dev/null +++ b/libcrux/include/internal/core.h @@ -0,0 +1,32 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __internal_core_H +#define __internal_core_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "../core.h" +#include "eurydice_glue.h" + +static inline int64_t +core_convert_num___core__convert__From_i32__for_i64__59__from(int32_t x0); + +static inline uint16_t core_num__u16_7__wrapping_add(uint16_t x0, uint16_t x1); + +static inline uint8_t core_num__u8_6__wrapping_sub(uint8_t x0, uint8_t x1); + +#define CORE_NUM__U32_8__BITS (32U) + +#if defined(__cplusplus) +} +#endif + +#define __internal_core_H_DEFINED +#endif diff --git a/libcrux/include/internal/libcrux_kyber.h b/libcrux/include/internal/libcrux_kyber.h new file mode 100644 index 000000000..1c7c5ad87 --- /dev/null +++ b/libcrux/include/internal/libcrux_kyber.h @@ -0,0 +1,45 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: /Users/franziskus/repos/eurydice//eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: a32b316e + KaRaMeL version: abb38e1d + */ + +#ifndef __internal_libcrux_kyber_H +#define __internal_libcrux_kyber_H + +#include "internal/core.h" +#include "../libcrux_kyber.h" +#include "eurydice_glue.h" + +typedef struct core_ops_range_Range__uint32_t_s +{ + uint32_t start; + uint32_t end; +} +core_ops_range_Range__uint32_t; + +typedef struct core_ops_range_Range__int32_t_s +{ + int32_t start; + int32_t end; +} +core_ops_range_Range__int32_t; + +typedef struct core_option_Option__Eurydice_slice_uint8_t_s +{ + core_option_Option__size_t_tags tag; + Eurydice_slice f0; +} +core_option_Option__Eurydice_slice_uint8_t; + +typedef struct K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t_s +{ + Eurydice_slice fst; + Eurydice_slice snd; +} +K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t; + + +#define __internal_libcrux_kyber_H_DEFINED +#endif diff --git a/libcrux/include/internal/libcrux_kyber768.h b/libcrux/include/internal/libcrux_kyber768.h new file mode 100644 index 000000000..724803510 --- /dev/null +++ b/libcrux/include/internal/libcrux_kyber768.h @@ -0,0 +1,105 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __internal_libcrux_kyber768_H +#define __internal_libcrux_kyber768_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "internal/libcrux_kyber_common.h" +#include "../libcrux_kyber768.h" +#include "eurydice_glue.h" + +typedef struct core_option_Option__Eurydice_slice_uint8_t_s +{ + core_option_Option__size_t_tags tag; + Eurydice_slice f0; +} +core_option_Option__Eurydice_slice_uint8_t; + +void +libcrux_kyber_ind_cpa_into_padded_array___34size_t(Eurydice_slice slice, uint8_t ret[34U]); + +void +libcrux_kyber_ind_cpa_into_padded_array___33size_t(Eurydice_slice slice, uint8_t ret[33U]); + +void libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_slice input, uint8_t ret[128U]); + +void +libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t( + Eurydice_slice randomness, + int32_t ret[256U] +); + +typedef struct K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t_s +{ + Eurydice_slice fst; + Eurydice_slice snd; +} +K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t; + +#define core_result_Ok 0 +#define core_result_Err 1 + +typedef uint8_t core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError_tags; + +typedef struct core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError_s +{ + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError_tags tag; + union { + uint8_t case_Ok[32U]; + core_array_TryFromSliceError case_Err; + } + val; +} +core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError; + +void +core_result__core__result__Result_T__E___unwrap__uint8_t_32size_t__core_array_TryFromSliceError( + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError self, + uint8_t ret[32U] +); + +void +libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_slice slice, uint8_t ret[64U]); + +void +libcrux_kyber_serialize_compress_then_serialize_ring_element_u___10size_t_320size_t( + int32_t re[256U], + uint8_t ret[320U] +); + +void +libcrux_kyber_serialize_compress_then_serialize_ring_element_v___4size_t_128size_t( + int32_t re[256U], + uint8_t ret[128U] +); + +void +libcrux_kyber_serialize_deserialize_then_decompress_ring_element_u___10size_t( + Eurydice_slice serialized, + int32_t ret[256U] +); + +void libcrux_kyber_ntt_ntt_vector_u___10size_t(int32_t re[256U], int32_t ret[256U]); + +void +libcrux_kyber_serialize_deserialize_then_decompress_ring_element_v___4size_t( + Eurydice_slice serialized, + int32_t ret[256U] +); + +void libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_slice input, uint8_t ret[32U]); + +#if defined(__cplusplus) +} +#endif + +#define __internal_libcrux_kyber768_H_DEFINED +#endif diff --git a/libcrux/include/internal/libcrux_kyber_common.h b/libcrux/include/internal/libcrux_kyber_common.h new file mode 100644 index 000000000..b69cd5fbf --- /dev/null +++ b/libcrux/include/internal/libcrux_kyber_common.h @@ -0,0 +1,1285 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __internal_libcrux_kyber_common_H +#define __internal_libcrux_kyber_common_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "internal/core.h" +#include "libcrux_digest.h" +#include "core.h" +#include "eurydice_glue.h" +#include "libcrux_hacl_glue.h" + +#define LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS ((int32_t)3329) + +#define LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT ((size_t)12U) + +#define LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT ((size_t)256U) + +#define LIBCRUX_KYBER_CONSTANTS_BITS_PER_RING_ELEMENT (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)12U) + +#define LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT (LIBCRUX_KYBER_CONSTANTS_BITS_PER_RING_ELEMENT / (size_t)8U) + +#define LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE ((size_t)32U) + +#define LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE ((size_t)32U) + +#define LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE ((size_t)32U) + +#define LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_SHIFT (16U) + +static inline uint32_t +libcrux_kyber_arithmetic_get_n_least_significant_bits(uint8_t n, uint32_t value) +{ + return value & ((1U << (uint32_t)n) - 1U); +} + +#define LIBCRUX_KYBER_ARITHMETIC_BARRETT_SHIFT ((int64_t)26) + +#define LIBCRUX_KYBER_ARITHMETIC_BARRETT_R ((int64_t)1 << (uint32_t)LIBCRUX_KYBER_ARITHMETIC_BARRETT_SHIFT) + +#define LIBCRUX_KYBER_ARITHMETIC_BARRETT_MULTIPLIER ((int64_t)20159) + +static inline int32_t libcrux_kyber_arithmetic_barrett_reduce(int32_t value) +{ + int64_t + t = + core_convert_num___core__convert__From_i32__for_i64__59__from(value) + * LIBCRUX_KYBER_ARITHMETIC_BARRETT_MULTIPLIER + + (LIBCRUX_KYBER_ARITHMETIC_BARRETT_R >> 1U); + int32_t quotient = (int32_t)(t >> (uint32_t)LIBCRUX_KYBER_ARITHMETIC_BARRETT_SHIFT); + return value - quotient * LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS; +} + +#define LIBCRUX_KYBER_ARITHMETIC_INVERSE_OF_MODULUS_MOD_MONTGOMERY_R (62209U) + +static inline int32_t libcrux_kyber_arithmetic_montgomery_reduce(int32_t value) +{ + uint32_t + t = + libcrux_kyber_arithmetic_get_n_least_significant_bits(LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_SHIFT, + (uint32_t)value) + * LIBCRUX_KYBER_ARITHMETIC_INVERSE_OF_MODULUS_MOD_MONTGOMERY_R; + int16_t + k = + (int16_t)libcrux_kyber_arithmetic_get_n_least_significant_bits(LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_SHIFT, + t); + int32_t k_times_modulus = (int32_t)k * LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS; + int32_t c = k_times_modulus >> (uint32_t)LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_SHIFT; + int32_t value_high = value >> (uint32_t)LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_SHIFT; + return value_high - c; +} + +static inline int32_t +libcrux_kyber_arithmetic_montgomery_multiply_fe_by_fer(int32_t fe, int32_t fer) +{ + return libcrux_kyber_arithmetic_montgomery_reduce(fe * fer); +} + +#define LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_R_SQUARED_MOD_FIELD_MODULUS ((int32_t)1353) + +static inline int32_t libcrux_kyber_arithmetic_to_standard_domain(int32_t mfe) +{ + return + libcrux_kyber_arithmetic_montgomery_reduce(mfe + * LIBCRUX_KYBER_ARITHMETIC_MONTGOMERY_R_SQUARED_MOD_FIELD_MODULUS); +} + +static inline uint16_t libcrux_kyber_arithmetic_to_unsigned_representative(int32_t fe) +{ + return (uint16_t)(fe + (LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS & fe >> 31U)); +} + +static const +int32_t +libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO[256U] = { 0U }; + +static inline uint8_t libcrux_kyber_compress_compress_message_coefficient(uint16_t fe) +{ + int16_t shifted = (int16_t)1664 - (int16_t)fe; + int16_t mask = shifted >> 15U; + int16_t shifted_to_positive = mask ^ shifted; + int16_t shifted_positive_in_range = shifted_to_positive - (int16_t)832; + return (uint8_t)(shifted_positive_in_range >> 15U & (int16_t)1); +} + +static inline int32_t +libcrux_kyber_compress_compress_ciphertext_coefficient(uint8_t coefficient_bits, uint16_t fe) +{ + uint64_t compressed = (uint64_t)fe << (uint32_t)coefficient_bits; + compressed = compressed + 1664ULL; + compressed = compressed * 10321340ULL; + compressed = compressed >> 35U; + return + (int32_t)libcrux_kyber_arithmetic_get_n_least_significant_bits(coefficient_bits, + (uint32_t)compressed); +} + +static inline int32_t libcrux_kyber_compress_decompress_message_coefficient(int32_t fe) +{ + return -fe & (LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS + (int32_t)1) / (int32_t)2; +} + +static inline int32_t +libcrux_kyber_compress_decompress_ciphertext_coefficient(uint8_t coefficient_bits, int32_t fe) +{ + uint32_t decompressed = (uint32_t)fe * (uint32_t)LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS; + decompressed = (decompressed << 1U) + (1U << (uint32_t)coefficient_bits); + decompressed = decompressed >> (uint32_t)((uint32_t)coefficient_bits + 1U); + return (int32_t)decompressed; +} + +static inline uint8_t libcrux_kyber_constant_time_ops_is_non_zero(uint8_t value) +{ + uint16_t value0 = (uint16_t)value; + uint16_t uu____0 = value0; + uint16_t + result = + (((uint32_t)uu____0 | (uint32_t)core_num__u16_7__wrapping_add(~value0, 1U)) & 0xFFFFU) + >> 8U + & 1U; + return (uint8_t)result; +} + +static inline void +libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time( + Eurydice_slice lhs, + Eurydice_slice rhs, + uint8_t selector, + uint8_t ret[32U] +) +{ + uint8_t + mask = core_num__u8_6__wrapping_sub(libcrux_kyber_constant_time_ops_is_non_zero(selector), 1U); + uint8_t out[32U] = { 0U }; + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE; i++) + { + size_t i0 = i; + uint8_t uu____0 = (uint32_t)Eurydice_slice_index(lhs, i0, uint8_t, uint8_t) & (uint32_t)mask; + uint8_t *uu____1 = &Eurydice_slice_index(rhs, i0, uint8_t, uint8_t); + out[i0] = (uint32_t)uu____0 | ((uint32_t)uu____1[0U] & (uint32_t)~mask); + } + memcpy(ret, out, (size_t)32U * sizeof (uint8_t)); +} + +static inline void libcrux_kyber_hash_functions_G(Eurydice_slice input, uint8_t ret[64U]) +{ + uint8_t ret0[64U]; + libcrux_digest_sha3_512(input, ret0); + memcpy(ret, ret0, (size_t)64U * sizeof (uint8_t)); +} + +static inline void libcrux_kyber_hash_functions_H(Eurydice_slice input, uint8_t ret[32U]) +{ + uint8_t ret0[32U]; + libcrux_digest_sha3_256(input, ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static inline void +libcrux_kyber_hash_functions_free_state( + libcrux_digest_incremental_x4_Shake128StateX4 xof_state +) +{ + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__free_memory(xof_state); +} + +typedef struct K___uint8_t_uint8_t_uint8_t_s +{ + uint8_t fst; + uint8_t snd; + uint8_t thd; +} +K___uint8_t_uint8_t_uint8_t; + +static inline K___uint8_t_uint8_t_uint8_t +libcrux_kyber_serialize_compress_coefficients_3(uint16_t coefficient1, uint16_t coefficient2) +{ + uint8_t coef1 = (uint8_t)((uint32_t)coefficient1 & 255U); + uint8_t coef2 = (uint8_t)((uint32_t)coefficient1 >> 8U | ((uint32_t)coefficient2 & 15U) << 4U); + uint8_t coef3 = (uint8_t)((uint32_t)coefficient2 >> 4U & 255U); + return ((K___uint8_t_uint8_t_uint8_t){ .fst = coef1, .snd = coef2, .thd = coef3 }); +} + +static inline void +libcrux_kyber_serialize_serialize_uncompressed_ring_element( + int32_t re[256U], + uint8_t ret[384U] +) +{ + uint8_t serialized[384U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)2U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)2U, + .end = i0 * (size_t)2U + (size_t)2U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint16_t + coefficient1 = + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t)); + uint16_t + coefficient2 = + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t)); + K___uint8_t_uint8_t_uint8_t + uu____0 = libcrux_kyber_serialize_compress_coefficients_3(coefficient1, coefficient2); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + serialized[(size_t)3U * i0] = coef1; + serialized[(size_t)3U * i0 + (size_t)1U] = coef2; + serialized[(size_t)3U * i0 + (size_t)2U] = coef3; + } + memcpy(ret, serialized, (size_t)384U * sizeof (uint8_t)); +} + +static inline void +libcrux_kyber_sampling_sample_from_binomial_distribution_2( + Eurydice_slice randomness, + int32_t ret[256U] +) +{ + int32_t sampled[256U]; + memcpy(sampled, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i0 = (size_t)0U; + i0 + < core_slice___Slice_T___len(randomness, uint8_t, size_t) / (size_t)4U; + i0++) + { + size_t chunk_number = i0; + Eurydice_slice + byte_chunk = + Eurydice_slice_subslice(randomness, + ( + (core_ops_range_Range__size_t){ + .start = chunk_number * (size_t)4U, + .end = chunk_number * (size_t)4U + (size_t)4U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint32_t uu____0 = (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)0U, uint8_t, uint8_t); + uint32_t + uu____1 = + uu____0 + | (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)1U, uint8_t, uint8_t) << 8U; + uint32_t + uu____2 = + uu____1 + | (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)2U, uint8_t, uint8_t) << 16U; + uint32_t + random_bits_as_u32 = + uu____2 + | (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)3U, uint8_t, uint8_t) << 24U; + uint32_t even_bits = random_bits_as_u32 & 1431655765U; + uint32_t odd_bits = random_bits_as_u32 >> 1U & 1431655765U; + uint32_t coin_toss_outcomes = even_bits + odd_bits; + for (uint32_t i = 0U; i < CORE_NUM__U32_8__BITS / 4U; i++) + { + uint32_t outcome_set = i; + uint32_t outcome_set0 = outcome_set * 4U; + int32_t outcome_1 = (int32_t)(coin_toss_outcomes >> (uint32_t)outcome_set0 & 3U); + int32_t outcome_2 = (int32_t)(coin_toss_outcomes >> (uint32_t)(outcome_set0 + 2U) & 3U); + size_t offset = (size_t)(outcome_set0 >> 2U); + sampled[(size_t)8U * chunk_number + offset] = outcome_1 - outcome_2; + } + } + memcpy(ret, sampled, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_sampling_sample_from_binomial_distribution_3( + Eurydice_slice randomness, + int32_t ret[256U] +) +{ + int32_t sampled[256U]; + memcpy(sampled, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i0 = (size_t)0U; + i0 + < core_slice___Slice_T___len(randomness, uint8_t, size_t) / (size_t)3U; + i0++) + { + size_t chunk_number = i0; + Eurydice_slice + byte_chunk = + Eurydice_slice_subslice(randomness, + ( + (core_ops_range_Range__size_t){ + .start = chunk_number * (size_t)3U, + .end = chunk_number * (size_t)3U + (size_t)3U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint32_t uu____0 = (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)0U, uint8_t, uint8_t); + uint32_t + uu____1 = + uu____0 + | (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)1U, uint8_t, uint8_t) << 8U; + uint32_t + random_bits_as_u24 = + uu____1 + | (uint32_t)Eurydice_slice_index(byte_chunk, (size_t)2U, uint8_t, uint8_t) << 16U; + uint32_t first_bits = random_bits_as_u24 & 2396745U; + uint32_t second_bits = random_bits_as_u24 >> 1U & 2396745U; + uint32_t third_bits = random_bits_as_u24 >> 2U & 2396745U; + uint32_t coin_toss_outcomes = first_bits + second_bits + third_bits; + for (int32_t i = (int32_t)0; i < (int32_t)24 / (int32_t)6; i++) + { + int32_t outcome_set = i; + int32_t outcome_set0 = outcome_set * (int32_t)6; + int32_t outcome_1 = (int32_t)(coin_toss_outcomes >> (uint32_t)outcome_set0 & 7U); + int32_t + outcome_2 = (int32_t)(coin_toss_outcomes >> (uint32_t)(outcome_set0 + (int32_t)3) & 7U); + size_t offset = (size_t)(outcome_set0 / (int32_t)6); + sampled[(size_t)4U * chunk_number + offset] = outcome_1 - outcome_2; + } + } + memcpy(ret, sampled, (size_t)256U * sizeof (int32_t)); +} + +static const +int32_t +libcrux_kyber_ntt_ZETAS_TIMES_MONTGOMERY_R[128U] = + { + (int32_t)-1044, (int32_t)-758, (int32_t)-359, (int32_t)-1517, (int32_t)1493, (int32_t)1422, + (int32_t)287, (int32_t)202, (int32_t)-171, (int32_t)622, (int32_t)1577, (int32_t)182, + (int32_t)962, (int32_t)-1202, (int32_t)-1474, (int32_t)1468, (int32_t)573, (int32_t)-1325, + (int32_t)264, (int32_t)383, (int32_t)-829, (int32_t)1458, (int32_t)-1602, (int32_t)-130, + (int32_t)-681, (int32_t)1017, (int32_t)732, (int32_t)608, (int32_t)-1542, (int32_t)411, + (int32_t)-205, (int32_t)-1571, (int32_t)1223, (int32_t)652, (int32_t)-552, (int32_t)1015, + (int32_t)-1293, (int32_t)1491, (int32_t)-282, (int32_t)-1544, (int32_t)516, (int32_t)-8, + (int32_t)-320, (int32_t)-666, (int32_t)-1618, (int32_t)-1162, (int32_t)126, (int32_t)1469, + (int32_t)-853, (int32_t)-90, (int32_t)-271, (int32_t)830, (int32_t)107, (int32_t)-1421, + (int32_t)-247, (int32_t)-951, (int32_t)-398, (int32_t)961, (int32_t)-1508, (int32_t)-725, + (int32_t)448, (int32_t)-1065, (int32_t)677, (int32_t)-1275, (int32_t)-1103, (int32_t)430, + (int32_t)555, (int32_t)843, (int32_t)-1251, (int32_t)871, (int32_t)1550, (int32_t)105, + (int32_t)422, (int32_t)587, (int32_t)177, (int32_t)-235, (int32_t)-291, (int32_t)-460, + (int32_t)1574, (int32_t)1653, (int32_t)-246, (int32_t)778, (int32_t)1159, (int32_t)-147, + (int32_t)-777, (int32_t)1483, (int32_t)-602, (int32_t)1119, (int32_t)-1590, (int32_t)644, + (int32_t)-872, (int32_t)349, (int32_t)418, (int32_t)329, (int32_t)-156, (int32_t)-75, + (int32_t)817, (int32_t)1097, (int32_t)603, (int32_t)610, (int32_t)1322, (int32_t)-1285, + (int32_t)-1465, (int32_t)384, (int32_t)-1215, (int32_t)-136, (int32_t)1218, (int32_t)-1335, + (int32_t)-874, (int32_t)220, (int32_t)-1187, (int32_t)-1659, (int32_t)-1185, (int32_t)-1530, + (int32_t)-1278, (int32_t)794, (int32_t)-1510, (int32_t)-854, (int32_t)-870, (int32_t)478, + (int32_t)-108, (int32_t)-308, (int32_t)996, (int32_t)991, (int32_t)958, (int32_t)-1460, + (int32_t)1522, (int32_t)1628 + }; + +static inline void +libcrux_kyber_ntt_ntt_at_layer( + size_t *zeta_i, + int32_t re[256U], + size_t layer, + size_t _initial_coefficient_bound, + int32_t ret[256U] +) +{ + size_t step = (size_t)1U << (uint32_t)layer; + for (size_t i0 = (size_t)0U; i0 < (size_t)128U >> (uint32_t)layer; i0++) + { + size_t round = i0; + zeta_i[0U] = zeta_i[0U] + (size_t)1U; + size_t offset = round * step * (size_t)2U; + for (size_t i = offset; i < offset + step; i++) + { + size_t j = i; + int32_t + t = + libcrux_kyber_arithmetic_montgomery_multiply_fe_by_fer(re[j + step], + libcrux_kyber_ntt_ZETAS_TIMES_MONTGOMERY_R[zeta_i[0U]]); + re[j + step] = re[j] - t; + re[j] = re[j] + t; + } + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_ntt_ntt_at_layer_3( + size_t *zeta_i, + int32_t re[256U], + size_t layer, + int32_t ret[256U] +) +{ + int32_t ret0[256U]; + libcrux_kyber_ntt_ntt_at_layer(zeta_i, re, layer, (size_t)3U, ret0); + memcpy(ret, ret0, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_ntt_ntt_binomially_sampled_ring_element(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = (size_t)1U; + for (size_t i = (size_t)0U; i < (size_t)128U; i++) + { + size_t j = i; + int32_t t = re[j + (size_t)128U] * (int32_t)-1600; + re[j + (size_t)128U] = re[j] - t; + re[j] = re[j] + t; + } + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_ntt_at_layer_3(&zeta_i, re, (size_t)1U, re); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +typedef struct K___int32_t_int32_t_s +{ + int32_t fst; + int32_t snd; +} +K___int32_t_int32_t; + +static inline K___int32_t_int32_t +libcrux_kyber_ntt_ntt_multiply_binomials( + K___int32_t_int32_t _, + K___int32_t_int32_t _0, + int32_t zeta +) +{ + int32_t a0 = _.fst; + int32_t a1 = _.snd; + int32_t b0 = _0.fst; + int32_t b1 = _0.snd; + int32_t uu____0 = a0 * b0; + int32_t + uu____1 = + libcrux_kyber_arithmetic_montgomery_reduce(uu____0 + + libcrux_kyber_arithmetic_montgomery_reduce(a1 * b1) * zeta); + return + ( + (K___int32_t_int32_t){ + .fst = uu____1, + .snd = libcrux_kyber_arithmetic_montgomery_reduce(a0 * b1 + a1 * b0) + } + ); +} + +static inline void +libcrux_kyber_ntt_ntt_multiply(int32_t (*lhs)[256U], int32_t (*rhs)[256U], int32_t ret[256U]) +{ + int32_t out[256U]; + memcpy(out, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT / (size_t)4U; + i++) + { + size_t i0 = i; + K___int32_t_int32_t lit0; + lit0.fst = lhs[0U][(size_t)4U * i0]; + lit0.snd = lhs[0U][(size_t)4U * i0 + (size_t)1U]; + K___int32_t_int32_t lit1; + lit1.fst = rhs[0U][(size_t)4U * i0]; + lit1.snd = rhs[0U][(size_t)4U * i0 + (size_t)1U]; + K___int32_t_int32_t + product = + libcrux_kyber_ntt_ntt_multiply_binomials(lit0, + lit1, + libcrux_kyber_ntt_ZETAS_TIMES_MONTGOMERY_R[(size_t)64U + i0]); + out[(size_t)4U * i0] = product.fst; + out[(size_t)4U * i0 + (size_t)1U] = product.snd; + K___int32_t_int32_t lit2; + lit2.fst = lhs[0U][(size_t)4U * i0 + (size_t)2U]; + lit2.snd = lhs[0U][(size_t)4U * i0 + (size_t)3U]; + K___int32_t_int32_t lit; + lit.fst = rhs[0U][(size_t)4U * i0 + (size_t)2U]; + lit.snd = rhs[0U][(size_t)4U * i0 + (size_t)3U]; + K___int32_t_int32_t + product0 = + libcrux_kyber_ntt_ntt_multiply_binomials(lit2, + lit, + -libcrux_kyber_ntt_ZETAS_TIMES_MONTGOMERY_R[(size_t)64U + i0]); + out[(size_t)4U * i0 + (size_t)2U] = product0.fst; + out[(size_t)4U * i0 + (size_t)3U] = product0.snd; + } + memcpy(ret, out, (size_t)256U * sizeof (int32_t)); +} + +typedef struct K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_s +{ + uint8_t fst; + uint8_t snd; + uint8_t thd; + uint8_t f3; + uint8_t f4; +} +K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t; + +static inline K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t +libcrux_kyber_serialize_compress_coefficients_10( + int32_t coefficient1, + int32_t coefficient2, + int32_t coefficient3, + int32_t coefficient4 +) +{ + uint8_t coef1 = (uint8_t)(coefficient1 & (int32_t)255); + uint8_t + coef2 = + (uint32_t)(uint8_t)(coefficient2 & (int32_t)63) + << 2U + | (uint32_t)(uint8_t)(coefficient1 >> 8U & (int32_t)3); + uint8_t + coef3 = + (uint32_t)(uint8_t)(coefficient3 & (int32_t)15) + << 4U + | (uint32_t)(uint8_t)(coefficient2 >> 6U & (int32_t)15); + uint8_t + coef4 = + (uint32_t)(uint8_t)(coefficient4 & (int32_t)3) + << 6U + | (uint32_t)(uint8_t)(coefficient3 >> 4U & (int32_t)63); + uint8_t coef5 = (uint8_t)(coefficient4 >> 2U & (int32_t)255); + return + ( + (K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t){ + .fst = coef1, + .snd = coef2, + .thd = coef3, + .f3 = coef4, + .f4 = coef5 + } + ); +} + +typedef struct +K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_s +{ + uint8_t fst; + uint8_t snd; + uint8_t thd; + uint8_t f3; + uint8_t f4; + uint8_t f5; + uint8_t f6; + uint8_t f7; + uint8_t f8; + uint8_t f9; + uint8_t f10; +} +K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t; + +static inline K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t +libcrux_kyber_serialize_compress_coefficients_11( + int32_t coefficient1, + int32_t coefficient2, + int32_t coefficient3, + int32_t coefficient4, + int32_t coefficient5, + int32_t coefficient6, + int32_t coefficient7, + int32_t coefficient8 +) +{ + uint8_t coef1 = (uint8_t)coefficient1; + uint8_t + coef2 = + (uint32_t)(uint8_t)(coefficient2 & (int32_t)31) + << 3U + | (uint32_t)(uint8_t)(coefficient1 >> 8U); + uint8_t + coef3 = + (uint32_t)(uint8_t)(coefficient3 & (int32_t)3) + << 6U + | (uint32_t)(uint8_t)(coefficient2 >> 5U); + uint8_t coef4 = (uint8_t)(coefficient3 >> 2U & (int32_t)255); + uint8_t + coef5 = + (uint32_t)(uint8_t)(coefficient4 & (int32_t)127) + << 1U + | (uint32_t)(uint8_t)(coefficient3 >> 10U); + uint8_t + coef6 = + (uint32_t)(uint8_t)(coefficient5 & (int32_t)15) + << 4U + | (uint32_t)(uint8_t)(coefficient4 >> 7U); + uint8_t + coef7 = + (uint32_t)(uint8_t)(coefficient6 & (int32_t)1) + << 7U + | (uint32_t)(uint8_t)(coefficient5 >> 4U); + uint8_t coef8 = (uint8_t)(coefficient6 >> 1U & (int32_t)255); + uint8_t + coef9 = + (uint32_t)(uint8_t)(coefficient7 & (int32_t)63) + << 2U + | (uint32_t)(uint8_t)(coefficient6 >> 9U); + uint8_t + coef10 = + (uint32_t)(uint8_t)(coefficient8 & (int32_t)7) + << 5U + | (uint32_t)(uint8_t)(coefficient7 >> 6U); + uint8_t coef11 = (uint8_t)(coefficient8 >> 3U); + return + ( + (K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t){ + .fst = coef1, + .snd = coef2, + .thd = coef3, + .f3 = coef4, + .f4 = coef5, + .f5 = coef6, + .f6 = coef7, + .f7 = coef8, + .f8 = coef9, + .f9 = coef10, + .f10 = coef11 + } + ); +} + +static inline void +libcrux_kyber_ntt_invert_ntt_at_layer( + size_t *zeta_i, + int32_t re[256U], + size_t layer, + int32_t ret[256U] +) +{ + size_t step = (size_t)1U << (uint32_t)layer; + for (size_t i0 = (size_t)0U; i0 < (size_t)128U >> (uint32_t)layer; i0++) + { + size_t round = i0; + zeta_i[0U] = zeta_i[0U] - (size_t)1U; + size_t offset = round * step * (size_t)2U; + for (size_t i = offset; i < offset + step; i++) + { + size_t j = i; + int32_t a_minus_b = re[j + step] - re[j]; + re[j] = re[j] + re[j + step]; + int32_t + uu____0 = + libcrux_kyber_arithmetic_montgomery_reduce(a_minus_b + * libcrux_kyber_ntt_ZETAS_TIMES_MONTGOMERY_R[zeta_i[0U]]); + re[j + step] = uu____0; + } + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_serialize_deserialize_then_decompress_message( + uint8_t serialized[32U], + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)32U, + serialized, + uint8_t, + Eurydice_slice), + uint8_t, + size_t); + i0++) + { + size_t i1 = i0; + uint8_t byte = serialized[i1]; + for (size_t i = (size_t)0U; i < (size_t)8U; i++) + { + size_t j = i; + int32_t coefficient_compressed = (int32_t)((uint32_t)byte >> (uint32_t)j & 1U); + int32_t + uu____0 = libcrux_kyber_compress_decompress_message_coefficient(coefficient_compressed); + re[(size_t)8U * i1 + j] = uu____0; + } + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t +libcrux_kyber_serialize_compress_coefficients_5( + uint8_t coefficient2, + uint8_t coefficient1, + uint8_t coefficient4, + uint8_t coefficient3, + uint8_t coefficient5, + uint8_t coefficient7, + uint8_t coefficient6, + uint8_t coefficient8 +) +{ + uint8_t coef1 = ((uint32_t)coefficient2 & 7U) << 5U | (uint32_t)coefficient1; + uint8_t + coef2 = + (((uint32_t)coefficient4 & 1U) << 7U | (uint32_t)coefficient3 << 2U) + | (uint32_t)coefficient2 >> 3U; + uint8_t coef3 = ((uint32_t)coefficient5 & 15U) << 4U | (uint32_t)coefficient4 >> 1U; + uint8_t + coef4 = + (((uint32_t)coefficient7 & 3U) << 6U | (uint32_t)coefficient6 << 1U) + | (uint32_t)coefficient5 >> 4U; + uint8_t coef5 = (uint32_t)coefficient8 << 3U | (uint32_t)coefficient7 >> 2U; + return + ( + (K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t){ + .fst = coef1, + .snd = coef2, + .thd = coef3, + .f3 = coef4, + .f4 = coef5 + } + ); +} + +static inline void +libcrux_kyber_serialize_deserialize_to_reduced_ring_element( + Eurydice_slice ring_element, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < core_slice___Slice_T___len(ring_element, uint8_t, size_t) / (size_t)3U; + i++) + { + size_t i0 = i; + Eurydice_slice + bytes = + Eurydice_slice_subslice(ring_element, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)3U, + .end = i0 * (size_t)3U + (size_t)3U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t byte1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t byte2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t byte3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + re[(size_t)2U * i0] = (byte2 & (int32_t)15) << 8U | (byte1 & (int32_t)255); + int32_t tmp = re[(size_t)2U * i0] % (int32_t)3329; + re[(size_t)2U * i0] = tmp; + re[(size_t)2U * i0 + (size_t)1U] = byte3 << 4U | (byte2 >> 4U & (int32_t)15); + int32_t tmp0 = re[(size_t)2U * i0 + (size_t)1U] % (int32_t)3329; + re[(size_t)2U * i0 + (size_t)1U] = tmp0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +typedef struct K___int32_t_int32_t_int32_t_int32_t_s +{ + int32_t fst; + int32_t snd; + int32_t thd; + int32_t f3; +} +K___int32_t_int32_t_int32_t_int32_t; + +static inline K___int32_t_int32_t_int32_t_int32_t +libcrux_kyber_serialize_decompress_coefficients_10( + int32_t byte2, + int32_t byte1, + int32_t byte3, + int32_t byte4, + int32_t byte5 +) +{ + int32_t coefficient1 = (byte2 & (int32_t)3) << 8U | (byte1 & (int32_t)255); + int32_t coefficient2 = (byte3 & (int32_t)15) << 6U | byte2 >> 2U; + int32_t coefficient3 = (byte4 & (int32_t)63) << 4U | byte3 >> 4U; + int32_t coefficient4 = byte5 << 2U | byte4 >> 6U; + return + ( + (K___int32_t_int32_t_int32_t_int32_t){ + .fst = coefficient1, + .snd = coefficient2, + .thd = coefficient3, + .f3 = coefficient4 + } + ); +} + +static inline void +libcrux_kyber_serialize_deserialize_then_decompress_10( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < core_slice___Slice_T___len(serialized, uint8_t, size_t) / (size_t)5U; + i++) + { + size_t i0 = i; + Eurydice_slice + bytes = + Eurydice_slice_subslice(serialized, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)5U, + .end = i0 * (size_t)5U + (size_t)5U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t byte1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t byte2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t byte3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t byte4 = (int32_t)Eurydice_slice_index(bytes, (size_t)3U, uint8_t, uint8_t); + int32_t byte5 = (int32_t)Eurydice_slice_index(bytes, (size_t)4U, uint8_t, uint8_t); + K___int32_t_int32_t_int32_t_int32_t + uu____0 = libcrux_kyber_serialize_decompress_coefficients_10(byte2, byte1, byte3, byte4, byte5); + int32_t coefficient1 = uu____0.fst; + int32_t coefficient2 = uu____0.snd; + int32_t coefficient3 = uu____0.thd; + int32_t coefficient4 = uu____0.f3; + int32_t uu____1 = libcrux_kyber_compress_decompress_ciphertext_coefficient(10U, coefficient1); + re[(size_t)4U * i0] = uu____1; + int32_t uu____2 = libcrux_kyber_compress_decompress_ciphertext_coefficient(10U, coefficient2); + re[(size_t)4U * i0 + (size_t)1U] = uu____2; + int32_t uu____3 = libcrux_kyber_compress_decompress_ciphertext_coefficient(10U, coefficient3); + re[(size_t)4U * i0 + (size_t)2U] = uu____3; + int32_t uu____4 = libcrux_kyber_compress_decompress_ciphertext_coefficient(10U, coefficient4); + re[(size_t)4U * i0 + (size_t)3U] = uu____4; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +typedef struct K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_s +{ + int32_t fst; + int32_t snd; + int32_t thd; + int32_t f3; + int32_t f4; + int32_t f5; + int32_t f6; + int32_t f7; +} +K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t; + +static inline K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t +libcrux_kyber_serialize_decompress_coefficients_11( + int32_t byte2, + int32_t byte1, + int32_t byte3, + int32_t byte5, + int32_t byte4, + int32_t byte6, + int32_t byte7, + int32_t byte9, + int32_t byte8, + int32_t byte10, + int32_t byte11 +) +{ + int32_t coefficient1 = (byte2 & (int32_t)7) << 8U | byte1; + int32_t coefficient2 = (byte3 & (int32_t)63) << 5U | byte2 >> 3U; + int32_t coefficient3 = ((byte5 & (int32_t)1) << 10U | byte4 << 2U) | byte3 >> 6U; + int32_t coefficient4 = (byte6 & (int32_t)15) << 7U | byte5 >> 1U; + int32_t coefficient5 = (byte7 & (int32_t)127) << 4U | byte6 >> 4U; + int32_t coefficient6 = ((byte9 & (int32_t)3) << 9U | byte8 << 1U) | byte7 >> 7U; + int32_t coefficient7 = (byte10 & (int32_t)31) << 6U | byte9 >> 2U; + int32_t coefficient8 = byte11 << 3U | byte10 >> 5U; + return + ( + (K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t){ + .fst = coefficient1, + .snd = coefficient2, + .thd = coefficient3, + .f3 = coefficient4, + .f4 = coefficient5, + .f5 = coefficient6, + .f6 = coefficient7, + .f7 = coefficient8 + } + ); +} + +static inline void +libcrux_kyber_serialize_deserialize_then_decompress_11( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < core_slice___Slice_T___len(serialized, uint8_t, size_t) / (size_t)11U; + i++) + { + size_t i0 = i; + Eurydice_slice + bytes = + Eurydice_slice_subslice(serialized, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)11U, + .end = i0 * (size_t)11U + (size_t)11U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t byte1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t byte2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t byte3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t byte4 = (int32_t)Eurydice_slice_index(bytes, (size_t)3U, uint8_t, uint8_t); + int32_t byte5 = (int32_t)Eurydice_slice_index(bytes, (size_t)4U, uint8_t, uint8_t); + int32_t byte6 = (int32_t)Eurydice_slice_index(bytes, (size_t)5U, uint8_t, uint8_t); + int32_t byte7 = (int32_t)Eurydice_slice_index(bytes, (size_t)6U, uint8_t, uint8_t); + int32_t byte8 = (int32_t)Eurydice_slice_index(bytes, (size_t)7U, uint8_t, uint8_t); + int32_t byte9 = (int32_t)Eurydice_slice_index(bytes, (size_t)8U, uint8_t, uint8_t); + int32_t byte10 = (int32_t)Eurydice_slice_index(bytes, (size_t)9U, uint8_t, uint8_t); + int32_t byte11 = (int32_t)Eurydice_slice_index(bytes, (size_t)10U, uint8_t, uint8_t); + K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t + uu____0 = + libcrux_kyber_serialize_decompress_coefficients_11(byte2, + byte1, + byte3, + byte5, + byte4, + byte6, + byte7, + byte9, + byte8, + byte10, + byte11); + int32_t coefficient1 = uu____0.fst; + int32_t coefficient2 = uu____0.snd; + int32_t coefficient3 = uu____0.thd; + int32_t coefficient4 = uu____0.f3; + int32_t coefficient5 = uu____0.f4; + int32_t coefficient6 = uu____0.f5; + int32_t coefficient7 = uu____0.f6; + int32_t coefficient8 = uu____0.f7; + int32_t uu____1 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient1); + re[(size_t)8U * i0] = uu____1; + int32_t uu____2 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient2); + re[(size_t)8U * i0 + (size_t)1U] = uu____2; + int32_t uu____3 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient3); + re[(size_t)8U * i0 + (size_t)2U] = uu____3; + int32_t uu____4 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient4); + re[(size_t)8U * i0 + (size_t)3U] = uu____4; + int32_t uu____5 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient5); + re[(size_t)8U * i0 + (size_t)4U] = uu____5; + int32_t uu____6 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient6); + re[(size_t)8U * i0 + (size_t)5U] = uu____6; + int32_t uu____7 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient7); + re[(size_t)8U * i0 + (size_t)6U] = uu____7; + int32_t uu____8 = libcrux_kyber_compress_decompress_ciphertext_coefficient(11U, coefficient8); + re[(size_t)8U * i0 + (size_t)7U] = uu____8; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_ntt_ntt_at_layer_3328( + size_t *zeta_i, + int32_t re[256U], + size_t layer, + int32_t ret[256U] +) +{ + int32_t ret0[256U]; + libcrux_kyber_ntt_ntt_at_layer(zeta_i, re, layer, (size_t)3328U, ret0); + memcpy(ret, ret0, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_serialize_deserialize_to_uncompressed_ring_element( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < core_slice___Slice_T___len(serialized, uint8_t, size_t) / (size_t)3U; + i++) + { + size_t i0 = i; + Eurydice_slice + bytes = + Eurydice_slice_subslice(serialized, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)3U, + .end = i0 * (size_t)3U + (size_t)3U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t byte1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t byte2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t byte3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + re[(size_t)2U * i0] = (byte2 & (int32_t)15) << 8U | (byte1 & (int32_t)255); + re[(size_t)2U * i0 + (size_t)1U] = byte3 << 4U | (byte2 >> 4U & (int32_t)15); + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline K___int32_t_int32_t +libcrux_kyber_serialize_decompress_coefficients_4(uint8_t *byte) +{ + int32_t coefficient1 = (int32_t)Eurydice_bitand_pv_u8(byte, 15U); + int32_t coefficient2 = (int32_t)((uint32_t)Eurydice_shr_pv_u8(byte, (int32_t)4) & 15U); + return ((K___int32_t_int32_t){ .fst = coefficient1, .snd = coefficient2 }); +} + +static inline void +libcrux_kyber_serialize_deserialize_then_decompress_4( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < core_slice___Slice_T___len(serialized, uint8_t, size_t); i++) + { + size_t i0 = i; + uint8_t *byte = &Eurydice_slice_index(serialized, i0, uint8_t, uint8_t); + K___int32_t_int32_t uu____0 = libcrux_kyber_serialize_decompress_coefficients_4(byte); + int32_t coefficient1 = uu____0.fst; + int32_t coefficient2 = uu____0.snd; + int32_t uu____1 = libcrux_kyber_compress_decompress_ciphertext_coefficient(4U, coefficient1); + re[(size_t)2U * i0] = uu____1; + int32_t uu____2 = libcrux_kyber_compress_decompress_ciphertext_coefficient(4U, coefficient2); + re[(size_t)2U * i0 + (size_t)1U] = uu____2; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t +libcrux_kyber_serialize_decompress_coefficients_5( + int32_t byte1, + int32_t byte2, + int32_t byte3, + int32_t byte4, + int32_t byte5 +) +{ + int32_t coefficient1 = byte1 & (int32_t)31; + int32_t coefficient2 = (byte2 & (int32_t)3) << 3U | byte1 >> 5U; + int32_t coefficient3 = byte2 >> 2U & (int32_t)31; + int32_t coefficient4 = (byte3 & (int32_t)15) << 1U | byte2 >> 7U; + int32_t coefficient5 = (byte4 & (int32_t)1) << 4U | byte3 >> 4U; + int32_t coefficient6 = byte4 >> 1U & (int32_t)31; + int32_t coefficient7 = (byte5 & (int32_t)7) << 2U | byte4 >> 6U; + int32_t coefficient8 = byte5 >> 3U; + return + ( + (K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t){ + .fst = coefficient1, + .snd = coefficient2, + .thd = coefficient3, + .f3 = coefficient4, + .f4 = coefficient5, + .f5 = coefficient6, + .f6 = coefficient7, + .f7 = coefficient8 + } + ); +} + +static inline void +libcrux_kyber_serialize_deserialize_then_decompress_5( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t re[256U]; + memcpy(re, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for + (size_t + i = (size_t)0U; + i + < core_slice___Slice_T___len(serialized, uint8_t, size_t) / (size_t)5U; + i++) + { + size_t i0 = i; + Eurydice_slice + bytes = + Eurydice_slice_subslice(serialized, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)5U, + .end = i0 * (size_t)5U + (size_t)5U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t byte1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t byte2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t byte3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t byte4 = (int32_t)Eurydice_slice_index(bytes, (size_t)3U, uint8_t, uint8_t); + int32_t byte5 = (int32_t)Eurydice_slice_index(bytes, (size_t)4U, uint8_t, uint8_t); + K___int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t_int32_t + uu____0 = libcrux_kyber_serialize_decompress_coefficients_5(byte1, byte2, byte3, byte4, byte5); + int32_t coefficient1 = uu____0.fst; + int32_t coefficient2 = uu____0.snd; + int32_t coefficient3 = uu____0.thd; + int32_t coefficient4 = uu____0.f3; + int32_t coefficient5 = uu____0.f4; + int32_t coefficient6 = uu____0.f5; + int32_t coefficient7 = uu____0.f6; + int32_t coefficient8 = uu____0.f7; + int32_t uu____1 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient1); + re[(size_t)8U * i0] = uu____1; + int32_t uu____2 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient2); + re[(size_t)8U * i0 + (size_t)1U] = uu____2; + int32_t uu____3 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient3); + re[(size_t)8U * i0 + (size_t)2U] = uu____3; + int32_t uu____4 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient4); + re[(size_t)8U * i0 + (size_t)3U] = uu____4; + int32_t uu____5 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient5); + re[(size_t)8U * i0 + (size_t)4U] = uu____5; + int32_t uu____6 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient6); + re[(size_t)8U * i0 + (size_t)5U] = uu____6; + int32_t uu____7 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient7); + re[(size_t)8U * i0 + (size_t)6U] = uu____7; + int32_t uu____8 = libcrux_kyber_compress_decompress_ciphertext_coefficient(5U, coefficient8); + re[(size_t)8U * i0 + (size_t)7U] = uu____8; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +libcrux_kyber_serialize_compress_then_serialize_message(int32_t re[256U], uint8_t ret[32U]) +{ + uint8_t serialized[32U] = { 0U }; + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)8U; + i0++) + { + size_t i1 = i0; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i1 * (size_t)8U, + .end = i1 * (size_t)8U + (size_t)8U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + for (size_t i = (size_t)0U; i < core_slice___Slice_T___len(coefficients, int32_t, size_t); i++) + { + size_t j = i; + int32_t *coefficient = &Eurydice_slice_index(coefficients, j, int32_t, int32_t); + uint16_t coefficient0 = libcrux_kyber_arithmetic_to_unsigned_representative(coefficient[0U]); + uint8_t + coefficient_compressed = libcrux_kyber_compress_compress_message_coefficient(coefficient0); + size_t uu____0 = i1; + serialized[uu____0] = + (uint32_t)serialized[uu____0] + | (uint32_t)coefficient_compressed << (uint32_t)j; + } + } + memcpy(ret, serialized, (size_t)32U * sizeof (uint8_t)); +} + +#if defined(__cplusplus) +} +#endif + +#define __internal_libcrux_kyber_common_H_DEFINED +#endif diff --git a/libcrux/include/libcrux_digest.h b/libcrux/include/libcrux_digest.h new file mode 100644 index 000000000..3d10d12e5 --- /dev/null +++ b/libcrux/include/libcrux_digest.h @@ -0,0 +1,48 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __libcrux_digest_H +#define __libcrux_digest_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "eurydice_glue.h" +#include "libcrux_hacl_glue.h" + +extern void libcrux_digest_sha3_512(Eurydice_slice x0, uint8_t x1[64U]); + +extern void libcrux_digest_sha3_256(Eurydice_slice x0, uint8_t x1[32U]); + +#define libcrux_digest_shake256(x_0, x_1, x_2, _ret_t) libcrux_digest_shake256_(x_0, x_1, x_2) + +extern void libcrux_digest_shake256_(size_t x0, Eurydice_slice x1, uint8_t *x2); + +extern libcrux_digest_incremental_x4_Shake128StateX4 +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__new(void); + +#define libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final(x_0, x_1, x_2, _ret_t) libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final_(x_0, x_1, x_2) + +extern void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final_( + size_t x0, + libcrux_digest_incremental_x4_Shake128StateX4 *x1, + Eurydice_slice *x2 +); + +extern void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__free_memory( + libcrux_digest_incremental_x4_Shake128StateX4 x0 +); + +#if defined(__cplusplus) +} +#endif + +#define __libcrux_digest_H_DEFINED +#endif diff --git a/libcrux/include/libcrux_hacl_glue.h b/libcrux/include/libcrux_hacl_glue.h new file mode 100644 index 000000000..26e904786 --- /dev/null +++ b/libcrux/include/libcrux_hacl_glue.h @@ -0,0 +1,56 @@ +/* Hand-written file */ + +#pragma once + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#include "Eurydice.h" + +#include +#include + +#ifdef HACL_CAN_COMPILE_VEC256 +#include "libintvector.h" +typedef struct libcrux_digest_incremental_x4_Shake128StateX4 +{ + Lib_IntVector_Intrinsics_vec256* x4; + uint64_t* st0; + uint64_t* st1; + uint64_t* st2; + uint64_t* st3; +} libcrux_digest_incremental_x4_Shake128StateX4; +#else +typedef struct libcrux_digest_incremental_x4_Shake128StateX4 +{ + uint64_t* st0; + uint64_t* st1; + uint64_t* st2; + uint64_t* st3; +} libcrux_digest_incremental_x4_Shake128StateX4; +#endif + +extern void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks_f( + libcrux_digest_incremental_x4_Shake128StateX4* xof_state, + size_t block_len, + size_t num, + uint8_t *output); + +#define libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks( \ + block_len, num, xof_state, output, c) \ + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks_f( \ + xof_state, block_len, num, (uint8_t *) output) + +// The last parameter should be x1[k] but Eurydice issues a prototype that has lost the length information. +void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final_( + size_t k, + libcrux_digest_incremental_x4_Shake128StateX4* x0, + Eurydice_slice *x1); + +#if defined(__cplusplus) +} +#endif diff --git a/libcrux/include/libcrux_kyber1024.h b/libcrux/include/libcrux_kyber1024.h new file mode 100644 index 000000000..bc66dbd99 --- /dev/null +++ b/libcrux/include/libcrux_kyber1024.h @@ -0,0 +1,125 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __libcrux_kyber1024_H +#define __libcrux_kyber1024_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "libcrux_digest.h" +#include "core.h" +#include "eurydice_glue.h" + +#define LIBCRUX_KYBER_KYBER1024_RANK_1024 ((size_t)4U) + +#define LIBCRUX_KYBER_KYBER1024_RANKED_BYTES_PER_RING_ELEMENT_1024 (LIBCRUX_KYBER_KYBER1024_RANK_1024 * LIBCRUX_KYBER_CONSTANTS_BITS_PER_RING_ELEMENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER1024_T_AS_NTT_ENCODED_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_RANK_1024 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER1024_VECTOR_U_COMPRESSION_FACTOR_1024 ((size_t)11U) + +#define LIBCRUX_KYBER_KYBER1024_C1_BLOCK_SIZE_1024 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER1024_VECTOR_U_COMPRESSION_FACTOR_1024 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER1024_C1_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_C1_BLOCK_SIZE_1024 * LIBCRUX_KYBER_KYBER1024_RANK_1024) + +#define LIBCRUX_KYBER_KYBER1024_VECTOR_V_COMPRESSION_FACTOR_1024 ((size_t)5U) + +#define LIBCRUX_KYBER_KYBER1024_C2_SIZE_1024 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER1024_VECTOR_V_COMPRESSION_FACTOR_1024 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER1024_CPA_PKE_SECRET_KEY_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_RANK_1024 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER1024_CPA_PKE_PUBLIC_KEY_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_T_AS_NTT_ENCODED_SIZE_1024 + (size_t)32U) + +#define LIBCRUX_KYBER_KYBER1024_CPA_PKE_CIPHERTEXT_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_C1_SIZE_1024 + LIBCRUX_KYBER_KYBER1024_C2_SIZE_1024) + +#define LIBCRUX_KYBER_KYBER1024_SECRET_KEY_SIZE_1024 (LIBCRUX_KYBER_KYBER1024_CPA_PKE_SECRET_KEY_SIZE_1024 + LIBCRUX_KYBER_KYBER1024_CPA_PKE_PUBLIC_KEY_SIZE_1024 + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE) + +#define LIBCRUX_KYBER_KYBER1024_ETA1 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER1024_ETA1_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER1024_ETA1 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER1024_ETA2 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER1024_ETA2_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER1024_ETA2 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER1024_IMPLICIT_REJECTION_HASH_INPUT_SIZE (LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE + LIBCRUX_KYBER_KYBER1024_CPA_PKE_CIPHERTEXT_SIZE_1024) + +typedef uint8_t libcrux_kyber_types_MlKemPublicKey___1568size_t[1568U]; + +typedef struct core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t___s +{ + core_option_Option__size_t_tags tag; + libcrux_kyber_types_MlKemPublicKey___1568size_t f0; +} +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__; + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__ +libcrux_kyber_kyber1024_validate_public_key(uint8_t public_key[1568U]); + +typedef struct libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t_s +{ + uint8_t sk[3168U]; + uint8_t pk[1568U]; +} +libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t; + +libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t +libcrux_kyber_kyber1024_generate_key_pair(uint8_t randomness[64U]); + +typedef struct libcrux_kyber_MlKemState___4size_t_s +{ + int32_t secret_as_ntt[4U][256U]; + int32_t t_as_ntt[4U][256U]; + int32_t a_transpose[4U][4U][256U]; + uint8_t rej[32U]; + uint8_t ind_cpa_public_key_hash[32U]; +} +libcrux_kyber_MlKemState___4size_t; + +typedef struct +K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t___s +{ + libcrux_kyber_MlKemState___4size_t fst; + uint8_t snd[1568U]; +} +K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t__; + +K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t__ +libcrux_kyber_kyber1024_generate_key_pair_unpacked(uint8_t randomness[64U]); + +typedef struct K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t__s +{ + uint8_t fst[1568U]; + uint8_t snd[32U]; +} +K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t_; + +K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t_ +libcrux_kyber_kyber1024_encapsulate(uint8_t (*public_key)[1568U], uint8_t randomness[32U]); + +void +libcrux_kyber_kyber1024_decapsulate( + uint8_t (*secret_key)[3168U], + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +); + +void +libcrux_kyber_kyber1024_decapsulate_unpacked( + libcrux_kyber_MlKemState___4size_t *state, + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +); + +#if defined(__cplusplus) +} +#endif + +#define __libcrux_kyber1024_H_DEFINED +#endif diff --git a/libcrux/include/libcrux_kyber512.h b/libcrux/include/libcrux_kyber512.h new file mode 100644 index 000000000..0f1542419 --- /dev/null +++ b/libcrux/include/libcrux_kyber512.h @@ -0,0 +1,125 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __libcrux_kyber512_H +#define __libcrux_kyber512_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "libcrux_digest.h" +#include "core.h" +#include "eurydice_glue.h" + +#define LIBCRUX_KYBER_KYBER512_RANK_512 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER512_RANKED_BYTES_PER_RING_ELEMENT_512 (LIBCRUX_KYBER_KYBER512_RANK_512 * LIBCRUX_KYBER_CONSTANTS_BITS_PER_RING_ELEMENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER512_T_AS_NTT_ENCODED_SIZE_512 (LIBCRUX_KYBER_KYBER512_RANK_512 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER512_VECTOR_U_COMPRESSION_FACTOR_512 ((size_t)10U) + +#define LIBCRUX_KYBER_KYBER512_C1_BLOCK_SIZE_512 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER512_VECTOR_U_COMPRESSION_FACTOR_512 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER512_C1_SIZE_512 (LIBCRUX_KYBER_KYBER512_C1_BLOCK_SIZE_512 * LIBCRUX_KYBER_KYBER512_RANK_512) + +#define LIBCRUX_KYBER_KYBER512_VECTOR_V_COMPRESSION_FACTOR_512 ((size_t)4U) + +#define LIBCRUX_KYBER_KYBER512_C2_SIZE_512 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER512_VECTOR_V_COMPRESSION_FACTOR_512 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER512_CPA_PKE_SECRET_KEY_SIZE_512 (LIBCRUX_KYBER_KYBER512_RANK_512 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER512_CPA_PKE_PUBLIC_KEY_SIZE_512 (LIBCRUX_KYBER_KYBER512_T_AS_NTT_ENCODED_SIZE_512 + (size_t)32U) + +#define LIBCRUX_KYBER_KYBER512_CPA_PKE_CIPHERTEXT_SIZE_512 (LIBCRUX_KYBER_KYBER512_C1_SIZE_512 + LIBCRUX_KYBER_KYBER512_C2_SIZE_512) + +#define LIBCRUX_KYBER_KYBER512_SECRET_KEY_SIZE_512 (LIBCRUX_KYBER_KYBER512_CPA_PKE_SECRET_KEY_SIZE_512 + LIBCRUX_KYBER_KYBER512_CPA_PKE_PUBLIC_KEY_SIZE_512 + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE) + +#define LIBCRUX_KYBER_KYBER512_ETA1 ((size_t)3U) + +#define LIBCRUX_KYBER_KYBER512_ETA1_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER512_ETA1 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER512_ETA2 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER512_ETA2_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER512_ETA2 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER512_IMPLICIT_REJECTION_HASH_INPUT_SIZE (LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE + LIBCRUX_KYBER_KYBER512_CPA_PKE_CIPHERTEXT_SIZE_512) + +typedef uint8_t libcrux_kyber_types_MlKemPublicKey___800size_t[800U]; + +typedef struct core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t___s +{ + core_option_Option__size_t_tags tag; + libcrux_kyber_types_MlKemPublicKey___800size_t f0; +} +core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__; + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__ +libcrux_kyber_kyber512_validate_public_key(uint8_t public_key[800U]); + +typedef struct libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t_s +{ + uint8_t sk[1632U]; + uint8_t pk[800U]; +} +libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t; + +libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t +libcrux_kyber_kyber512_generate_key_pair(uint8_t randomness[64U]); + +typedef struct libcrux_kyber_MlKemState___2size_t_s +{ + int32_t secret_as_ntt[2U][256U]; + int32_t t_as_ntt[2U][256U]; + int32_t a_transpose[2U][2U][256U]; + uint8_t rej[32U]; + uint8_t ind_cpa_public_key_hash[32U]; +} +libcrux_kyber_MlKemState___2size_t; + +typedef struct +K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t___s +{ + libcrux_kyber_MlKemState___2size_t fst; + uint8_t snd[800U]; +} +K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t__; + +K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t__ +libcrux_kyber_kyber512_generate_key_pair_unpacked(uint8_t randomness[64U]); + +typedef struct K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t__s +{ + uint8_t fst[768U]; + uint8_t snd[32U]; +} +K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t_; + +K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t_ +libcrux_kyber_kyber512_encapsulate(uint8_t (*public_key)[800U], uint8_t randomness[32U]); + +void +libcrux_kyber_kyber512_decapsulate( + uint8_t (*secret_key)[1632U], + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +); + +void +libcrux_kyber_kyber512_decapsulate_unpacked( + libcrux_kyber_MlKemState___2size_t *state, + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +); + +#if defined(__cplusplus) +} +#endif + +#define __libcrux_kyber512_H_DEFINED +#endif diff --git a/libcrux/include/libcrux_kyber768.h b/libcrux/include/libcrux_kyber768.h new file mode 100644 index 000000000..42a9afcca --- /dev/null +++ b/libcrux/include/libcrux_kyber768.h @@ -0,0 +1,125 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#ifndef __libcrux_kyber768_H +#define __libcrux_kyber768_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "libcrux_digest.h" +#include "core.h" +#include "eurydice_glue.h" + +#define LIBCRUX_KYBER_KYBER768_RANK_768 ((size_t)3U) + +#define LIBCRUX_KYBER_KYBER768_RANKED_BYTES_PER_RING_ELEMENT_768 (LIBCRUX_KYBER_KYBER768_RANK_768 * LIBCRUX_KYBER_CONSTANTS_BITS_PER_RING_ELEMENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER768_T_AS_NTT_ENCODED_SIZE_768 (LIBCRUX_KYBER_KYBER768_RANK_768 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER768_VECTOR_U_COMPRESSION_FACTOR_768 ((size_t)10U) + +#define LIBCRUX_KYBER_KYBER768_C1_BLOCK_SIZE_768 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER768_VECTOR_U_COMPRESSION_FACTOR_768 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER768_C1_SIZE_768 (LIBCRUX_KYBER_KYBER768_C1_BLOCK_SIZE_768 * LIBCRUX_KYBER_KYBER768_RANK_768) + +#define LIBCRUX_KYBER_KYBER768_VECTOR_V_COMPRESSION_FACTOR_768 ((size_t)4U) + +#define LIBCRUX_KYBER_KYBER768_C2_SIZE_768 (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_KYBER768_VECTOR_V_COMPRESSION_FACTOR_768 / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER768_CPA_PKE_SECRET_KEY_SIZE_768 (LIBCRUX_KYBER_KYBER768_RANK_768 * LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * LIBCRUX_KYBER_CONSTANTS_BITS_PER_COEFFICIENT / (size_t)8U) + +#define LIBCRUX_KYBER_KYBER768_CPA_PKE_PUBLIC_KEY_SIZE_768 (LIBCRUX_KYBER_KYBER768_T_AS_NTT_ENCODED_SIZE_768 + (size_t)32U) + +#define LIBCRUX_KYBER_KYBER768_CPA_PKE_CIPHERTEXT_SIZE_768 (LIBCRUX_KYBER_KYBER768_C1_SIZE_768 + LIBCRUX_KYBER_KYBER768_C2_SIZE_768) + +#define LIBCRUX_KYBER_KYBER768_SECRET_KEY_SIZE_768 (LIBCRUX_KYBER_KYBER768_CPA_PKE_SECRET_KEY_SIZE_768 + LIBCRUX_KYBER_KYBER768_CPA_PKE_PUBLIC_KEY_SIZE_768 + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE) + +#define LIBCRUX_KYBER_KYBER768_ETA1 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER768_ETA1_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER768_ETA1 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER768_ETA2 ((size_t)2U) + +#define LIBCRUX_KYBER_KYBER768_ETA2_RANDOMNESS_SIZE (LIBCRUX_KYBER_KYBER768_ETA2 * (size_t)64U) + +#define LIBCRUX_KYBER_KYBER768_IMPLICIT_REJECTION_HASH_INPUT_SIZE (LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE + LIBCRUX_KYBER_KYBER768_CPA_PKE_CIPHERTEXT_SIZE_768) + +typedef uint8_t libcrux_kyber_types_MlKemPublicKey___1184size_t[1184U]; + +typedef struct core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t___s +{ + core_option_Option__size_t_tags tag; + libcrux_kyber_types_MlKemPublicKey___1184size_t f0; +} +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__; + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__ +libcrux_kyber_kyber768_validate_public_key(uint8_t public_key[1184U]); + +typedef struct libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t_s +{ + uint8_t sk[2400U]; + uint8_t pk[1184U]; +} +libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t; + +libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t +libcrux_kyber_kyber768_generate_key_pair(uint8_t randomness[64U]); + +typedef struct libcrux_kyber_MlKemState___3size_t_s +{ + int32_t secret_as_ntt[3U][256U]; + int32_t t_as_ntt[3U][256U]; + int32_t a_transpose[3U][3U][256U]; + uint8_t rej[32U]; + uint8_t ind_cpa_public_key_hash[32U]; +} +libcrux_kyber_MlKemState___3size_t; + +typedef struct +K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t___s +{ + libcrux_kyber_MlKemState___3size_t fst; + uint8_t snd[1184U]; +} +K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t__; + +K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t__ +libcrux_kyber_kyber768_generate_key_pair_unpacked(uint8_t randomness[64U]); + +typedef struct K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t__s +{ + uint8_t fst[1088U]; + uint8_t snd[32U]; +} +K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_; + +K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_ +libcrux_kyber_kyber768_encapsulate(uint8_t (*public_key)[1184U], uint8_t randomness[32U]); + +void +libcrux_kyber_kyber768_decapsulate( + uint8_t (*secret_key)[2400U], + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +); + +void +libcrux_kyber_kyber768_decapsulate_unpacked( + libcrux_kyber_MlKemState___3size_t *state, + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +); + +#if defined(__cplusplus) +} +#endif + +#define __libcrux_kyber768_H_DEFINED +#endif diff --git a/libcrux/include/libcrux_platform.h b/libcrux/include/libcrux_platform.h new file mode 100644 index 000000000..f338a078e --- /dev/null +++ b/libcrux/include/libcrux_platform.h @@ -0,0 +1,17 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: /Users/bhargava/Desktop/repositories/eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: a32b316e + KaRaMeL version: 08bfa78a + */ + +#ifndef __libcrux_platform_H +#define __libcrux_platform_H + +#include "eurydice_glue.h" + +extern bool libcrux_platform_simd256_support(void); + + +#define __libcrux_platform_H_DEFINED +#endif diff --git a/libcrux/src/Eurydice.c b/libcrux/src/Eurydice.c new file mode 100644 index 000000000..e1a2df799 --- /dev/null +++ b/libcrux/src/Eurydice.c @@ -0,0 +1,13 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: /Users/franziskus/repos/eurydice//eurydice ../libcrux_kyber.llbc + F* version: a32b316e + KaRaMeL version: f1b60dfc + */ + +#include "Eurydice.h" + +typedef size_t RangeTo__size_t; + +typedef size_t RangeFrom__size_t; + diff --git a/libcrux/src/Libcrux_Kem_Kyber_Kyber768.c b/libcrux/src/Libcrux_Kem_Kyber_Kyber768.c new file mode 100644 index 000000000..e5094c1d8 --- /dev/null +++ b/libcrux/src/Libcrux_Kem_Kyber_Kyber768.c @@ -0,0 +1,36 @@ +#include + +#include "Libcrux_Kem_Kyber_Kyber768.h" +#include "libcrux_kyber768.h" + +void +Libcrux_Kyber768_GenerateKeyPair(uint8_t* pk, + uint8_t* sk, + uint8_t randomness[64]) +{ + libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t result = + libcrux_kyber_kyber768_generate_key_pair(randomness); + + memcpy(pk, result.pk, KYBER768_PUBLICKEYBYTES); + memcpy(sk, result.sk, KYBER768_SECRETKEYBYTES); +} + +void +Libcrux_Kyber768_Encapsulate(uint8_t* ct, + uint8_t* ss, + uint8_t (*pk)[1184], + uint8_t randomness[32]) +{ + K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_ + result = libcrux_kyber_kyber768_encapsulate(pk, randomness); + memcpy(ct, result.fst, KYBER768_CIPHERTEXTBYTES); + memcpy(ss, result.snd, KYBER768_SHAREDSECRETBYTES); +} + +void +Libcrux_Kyber768_Decapsulate(uint8_t ss[32U], + uint8_t (*ct)[1088U], + uint8_t (*sk)[2400U]) +{ + libcrux_kyber_kyber768_decapsulate(sk, ct, ss); +} diff --git a/libcrux/src/LowStar_Ignore.c b/libcrux/src/LowStar_Ignore.c new file mode 100644 index 000000000..5048b1134 --- /dev/null +++ b/libcrux/src/LowStar_Ignore.c @@ -0,0 +1,11 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: /Users/franziskus/repos/eurydice//eurydice ../libcrux_kyber.llbc + F* version: d0aa54cf + KaRaMeL version: 1f52609c + */ + +#include "LowStar_Ignore.h" + + + diff --git a/libcrux/src/core.c b/libcrux/src/core.c new file mode 100644 index 000000000..97af1c05c --- /dev/null +++ b/libcrux/src/core.c @@ -0,0 +1,34 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#include "internal/core.h" + +typedef size_t RangeTo__size_t; + +typedef size_t RangeFrom__size_t; + +typedef struct Option__size_t_s +{ + core_option_Option__size_t_tags tag; + size_t f0; +} +Option__size_t; + +typedef struct Option__uint32_t_s +{ + core_option_Option__size_t_tags tag; + uint32_t f0; +} +Option__uint32_t; + +typedef struct Option__int32_t_s +{ + core_option_Option__size_t_tags tag; + int32_t f0; +} +Option__int32_t; + diff --git a/libcrux/src/libcrux_hacl_glue.c b/libcrux/src/libcrux_hacl_glue.c new file mode 100644 index 000000000..42f53b4a8 --- /dev/null +++ b/libcrux/src/libcrux_hacl_glue.c @@ -0,0 +1,174 @@ +#include "libcrux_hacl_glue.h" +#include "Hacl_Hash_SHA3.h" +#include "libcrux_digest.h" +#include "libcrux_kyber768.h" +#include "libcrux_platform.h" + +#ifdef HACL_CAN_COMPILE_VEC256 +#include "EverCrypt_AutoConfig2.h" +#include "Hacl_Hash_SHA3_Simd256.h" +#endif +#include "Hacl_Hash_SHA3_Scalar.h" + +static int evercrypt_initialized = false; + +bool +libcrux_platform_simd256_support(void) +{ +#ifdef HACL_CAN_COMPILE_VEC256 + // TODO: call runtime CPU detection to detect whether the target machine does have AVX2 + if (!evercrypt_initialized) { + EverCrypt_AutoConfig2_init(); + evercrypt_initialized = true; + } + return EverCrypt_AutoConfig2_has_avx2(); +#endif + return false; +} + +inline void +libcrux_digest_shake256_(size_t len, Eurydice_slice input, uint8_t* out) +{ + Hacl_Hash_SHA3_shake256_hacl(input.len, input.ptr, (uint32_t)len, out); +} + +inline void +libcrux_digest_shake128_(size_t len, Eurydice_slice input, uint8_t* out) +{ + Hacl_Hash_SHA3_shake128_hacl(input.len, input.ptr, (uint32_t)len, out); +} + +inline void +libcrux_digest_sha3_512(Eurydice_slice x0, uint8_t x1[64U]) +{ + Hacl_Hash_SHA3_sha3_512(x1, x0.ptr, (uint32_t)x0.len); +} + +inline void +libcrux_digest_sha3_256(Eurydice_slice x0, uint8_t x1[32U]) +{ + Hacl_Hash_SHA3_sha3_256(x1, x0.ptr, (uint32_t)x0.len); +} + +inline libcrux_digest_incremental_x4_Shake128StateX4 +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__new( + void) +{ +#ifdef HACL_CAN_COMPILE_VEC256 + if (libcrux_platform_simd256_support()) { + return (libcrux_digest_incremental_x4_Shake128StateX4){ + .x4 = Hacl_Hash_SHA3_Simd256_state_malloc(), + .st0 = NULL, + .st1 = NULL, + .st2 = NULL, + .st3 = NULL, + }; + } else { + uint64_t* st0 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st1 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st2 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st3 = Hacl_Hash_SHA3_Scalar_state_malloc(); + return (libcrux_digest_incremental_x4_Shake128StateX4){ + .x4 = NULL, .st0 = st0, .st1 = st1, .st2 = st2, .st3 = st3 + }; + } +#else + uint64_t* st0 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st1 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st2 = Hacl_Hash_SHA3_Scalar_state_malloc(); + uint64_t* st3 = Hacl_Hash_SHA3_Scalar_state_malloc(); + return (libcrux_digest_incremental_x4_Shake128StateX4){ + .st0 = st0, .st1 = st1, .st2 = st2, .st3 = st3 + }; +#endif +} + +inline void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final_( + size_t k, + libcrux_digest_incremental_x4_Shake128StateX4* state, + //Eurydice_slice x1[k]) + Eurydice_slice *x1) +{ +#ifdef HACL_CAN_COMPILE_VEC256 + if (libcrux_platform_simd256_support()) { + Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + state->x4, x1[0].ptr, x1[1].ptr, x1[2 % k].ptr, x1[3 % k].ptr, x1[0].len); + } else { + // This function requires that the data be no longer than a partial block, + // meaning we can safely downcast into a uint32_t. + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st0, x1[0].ptr, (uint32_t) x1[0].len); + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st1, x1[1].ptr, (uint32_t) x1[1].len); + if (k >= 3) + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st2, x1[2].ptr, (uint32_t) x1[2].len); + if (k >= 4) + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st3, x1[3].ptr, (uint32_t) x1[3].len); + } +#else + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st0, x1[0].ptr, (uint32_t) x1[0].len); + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st1, x1[1].ptr, (uint32_t) x1[1].len); + if (k >= 3) + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st2, x1[2].ptr, (uint32_t) x1[2].len); + if (k >= 4) + Hacl_Hash_SHA3_Scalar_shake128_absorb_final(state->st3, x1[3].ptr, (uint32_t) x1[3].len); +#endif +} + +inline void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks_f( + libcrux_digest_incremental_x4_Shake128StateX4* x1, + size_t block_len, + size_t num, + uint8_t *output) +{ +#ifdef HACL_CAN_COMPILE_VEC256 + if (libcrux_platform_simd256_support()) { + // FIXME: the API does not allow aliased inputs -- discuss with Mamone + uint8_t* tmp1 = KRML_HOST_MALLOC(block_len); + uint8_t* tmp2 = KRML_HOST_MALLOC(block_len); + Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks(x1->x4, + output + 0 * block_len, + output + 1 * block_len, + num >= 3 ? output + 2 * block_len : tmp1, + num >= 4 ? output + 3 * block_len : tmp2, + block_len); + free(tmp1); + free(tmp2); + } else { + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st0, output + 0 * block_len, block_len); + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st1, output + 1 * block_len, block_len); + if (num >= 3) + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st2, output + 2 * block_len, block_len); + if (num >= 4) + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st3, output + 3 * block_len, block_len); + } +#else + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st0, output + 0 * block_len, block_len); + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st1, output + 1 * block_len, block_len); + if (num >= 3) + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st2, output + 2 * block_len, block_len); + if (num >= 4) + Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks(x1->st3, output + 3 * block_len, block_len); +#endif +} + +inline void +libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__free_memory( + libcrux_digest_incremental_x4_Shake128StateX4 x0) +{ +#ifdef HACL_CAN_COMPILE_VEC256 + if (libcrux_platform_simd256_support()) { + Hacl_Hash_SHA3_Simd256_state_free(x0.x4); + } else { + Hacl_Hash_SHA3_Scalar_state_free(x0.st0); + Hacl_Hash_SHA3_Scalar_state_free(x0.st1); + Hacl_Hash_SHA3_Scalar_state_free(x0.st2); + Hacl_Hash_SHA3_Scalar_state_free(x0.st3); + } +#else + Hacl_Hash_SHA3_Scalar_state_free(x0.st0); + Hacl_Hash_SHA3_Scalar_state_free(x0.st1); + Hacl_Hash_SHA3_Scalar_state_free(x0.st2); + Hacl_Hash_SHA3_Scalar_state_free(x0.st3); +#endif +} diff --git a/libcrux/src/libcrux_kyber1024.c b/libcrux/src/libcrux_kyber1024.c new file mode 100644 index 000000000..851c9256e --- /dev/null +++ b/libcrux/src/libcrux_kyber1024.c @@ -0,0 +1,2396 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#include "libcrux_kyber1024.h" + +#include "internal/libcrux_kyber_common.h" +#include "internal/libcrux_kyber768.h" +#include "libcrux_hacl_glue.h" + +static inline void +deserialize_ring_elements_reduced___1568size_t_4size_t( + Eurydice_slice public_key, + int32_t ret[4U][256U] +) +{ + int32_t deserialized_pk[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +serialize_secret_key___4size_t_1536size_t(int32_t key[4U][256U], uint8_t ret[1536U]) +{ + uint8_t out[1536U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + key, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, key[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)1536U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = (i0 + (size_t)1U) * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[384U]; + libcrux_kyber_serialize_serialize_uncompressed_ring_element(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)384U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)1536U * sizeof (uint8_t)); +} + +static inline void +serialize_public_key___4size_t_1536size_t_1568size_t( + int32_t t_as_ntt[4U][256U], + Eurydice_slice seed_for_a, + uint8_t ret[1568U] +) +{ + uint8_t public_key_serialized[1568U] = { 0U }; + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)1568U, + public_key_serialized, + ((core_ops_range_Range__size_t){ .start = (size_t)0U, .end = (size_t)1536U }), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____1[4U][256U]; + memcpy(uu____1, t_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + uint8_t ret0[1536U]; + serialize_secret_key___4size_t_1536size_t(uu____1, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)1536U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)1568U, + public_key_serialized, + (size_t)1536U, + uint8_t, + size_t, + Eurydice_slice), + seed_for_a, + uint8_t, + void *); + memcpy(ret, public_key_serialized, (size_t)1568U * sizeof (uint8_t)); +} + +static bool validate_public_key___4size_t_1536size_t_1568size_t(uint8_t *public_key) +{ + int32_t deserialized_pk[4U][256U]; + deserialize_ring_elements_reduced___1568size_t_4size_t(Eurydice_array_to_subslice_to((size_t)1568U, + public_key, + (size_t)1536U, + uint8_t, + size_t, + Eurydice_slice), + deserialized_pk); + int32_t uu____0[4U][256U]; + memcpy(uu____0, deserialized_pk, (size_t)4U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[1568U]; + serialize_public_key___4size_t_1536size_t_1568size_t(uu____0, + Eurydice_array_to_subslice_from((size_t)1568U, + public_key, + (size_t)1536U, + uint8_t, + size_t, + Eurydice_slice), + public_key_serialized); + return + core_array_equality___core__cmp__PartialEq__Array_B__N___for__Array_A__N____eq((size_t)1568U, + public_key, + public_key_serialized, + uint8_t, + uint8_t, + bool); +} + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__ +libcrux_kyber_kyber1024_validate_public_key(uint8_t public_key[1568U]) +{ + core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__ uu____0; + if (validate_public_key___4size_t_1536size_t_1568size_t(public_key)) + { + core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__ lit; + lit.tag = core_option_Some; + memcpy(lit.f0, public_key, (size_t)1568U * sizeof (uint8_t)); + uu____0 = lit; + } + else + { + uu____0 = + ( + (core_option_Option__libcrux_kyber_types_MlKemPublicKey__1568size_t__){ + .tag = core_option_None + } + ); + } + return uu____0; +} + +static inline libcrux_digest_incremental_x4_Shake128StateX4 +absorb___4size_t(uint8_t input[4U][34U]) +{ + libcrux_digest_incremental_x4_Shake128StateX4 + state = libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__new(); + Eurydice_slice data[4U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + uint8_t buf[1U] = { 0U }; + data[i] = Eurydice_array_to_slice((size_t)1U, buf, uint8_t, Eurydice_slice); + } + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + Eurydice_slice + uu____0 = Eurydice_array_to_slice((size_t)34U, input[i0], uint8_t, Eurydice_slice); + data[i0] = uu____0; + } + libcrux_digest_incremental_x4_Shake128StateX4 *uu____1 = &state; + Eurydice_slice uu____2[4U]; + memcpy(uu____2, data, (size_t)4U * sizeof (Eurydice_slice)); + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final((size_t)4U, + uu____1, + uu____2, + void *); + return state; +} + +static inline void +squeeze_three_blocks___4size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[4U][504U] +) +{ + uint8_t output[4U][504U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)504U, + (size_t)4U, + xof_state, + output, + void *); + uint8_t out[4U][504U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + uint8_t uu____0[504U]; + memcpy(uu____0, output[i0], (size_t)504U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)504U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)4U * sizeof (uint8_t [504U])); +} + +static bool +sample_from_uniform_distribution_next___4size_t_504size_t( + uint8_t randomness[4U][504U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)504U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static inline void +squeeze_block___4size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[4U][168U] +) +{ + uint8_t output[4U][168U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)168U, + (size_t)4U, + xof_state, + output, + void *); + uint8_t out[4U][168U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + uint8_t uu____0[168U]; + memcpy(uu____0, output[i0], (size_t)168U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)168U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)4U * sizeof (uint8_t [168U])); +} + +static bool +sample_from_uniform_distribution_next___4size_t_168size_t( + uint8_t randomness[4U][168U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)168U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static void sample_from_xof___4size_t(uint8_t seeds[4U][34U], int32_t ret[4U][256U]) +{ + size_t sampled_coefficients[4U] = { 0U }; + int32_t out[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(out[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + uint8_t uu____0[4U][34U]; + memcpy(uu____0, seeds, (size_t)4U * sizeof (uint8_t [34U])); + libcrux_digest_incremental_x4_Shake128StateX4 xof_state = absorb___4size_t(uu____0); + uint8_t randomness0[4U][504U]; + squeeze_three_blocks___4size_t(&xof_state, randomness0); + uint8_t uu____1[4U][504U]; + memcpy(uu____1, randomness0, (size_t)4U * sizeof (uint8_t [504U])); + bool + done = + sample_from_uniform_distribution_next___4size_t_504size_t(uu____1, + sampled_coefficients, + out); + while (true) + { + if (!!done) + { + break; + } + uint8_t randomness[4U][168U]; + squeeze_block___4size_t(&xof_state, randomness); + uint8_t uu____2[4U][168U]; + memcpy(uu____2, randomness, (size_t)4U * sizeof (uint8_t [168U])); + done = + sample_from_uniform_distribution_next___4size_t_168size_t(uu____2, + sampled_coefficients, + out); + } + libcrux_kyber_hash_functions_free_state(xof_state); + memcpy(ret, out, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +sample_matrix_A___4size_t(uint8_t seed[34U], bool transpose, int32_t ret[4U][4U][256U]) +{ + int32_t A_transpose[4U][4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(A_transpose[i][0U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][1U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][2U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][3U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i0 = (size_t)0U; i0 < (size_t)4U; i0++) + { + size_t i1 = i0; + uint8_t uu____0[34U]; + memcpy(uu____0, seed, (size_t)34U * sizeof (uint8_t)); + uint8_t seeds[4U][34U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(seeds[i], uu____0, (size_t)34U * sizeof (uint8_t)); + } + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t j = i; + seeds[j][32U] = (uint8_t)i1; + seeds[j][33U] = (uint8_t)j; + } + uint8_t uu____1[4U][34U]; + memcpy(uu____1, seeds, (size_t)4U * sizeof (uint8_t [34U])); + int32_t sampled[4U][256U]; + sample_from_xof___4size_t(uu____1, sampled); + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t j = i; + if (transpose) + { + memcpy(A_transpose[j][i1], sampled[j], (size_t)256U * sizeof (int32_t)); + } + else + { + memcpy(A_transpose[i1][j], sampled[j], (size_t)256U * sizeof (int32_t)); + } + } + } + memcpy(ret, A_transpose, (size_t)4U * sizeof (int32_t [4U][256U])); +} + +typedef struct __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t_s +{ + int32_t fst[4U][256U]; + uint8_t snd; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t; + +static inline __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t +sample_vector_cbd_then_ntt___4size_t_2size_t_128size_t( + uint8_t prf_input[33U], + uint8_t domain_separator +) +{ + int32_t re_as_ntt[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(re_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator; + domain_separator = (uint32_t)domain_separator + 1U; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t r[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + r); + int32_t uu____0[256U]; + libcrux_kyber_ntt_ntt_binomially_sampled_ring_element(r, uu____0); + memcpy(re_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[4U][256U]; + memcpy(uu____1, re_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t lit; + memcpy(lit.fst, uu____1, (size_t)4U * sizeof (int32_t [256U])); + lit.snd = domain_separator; + return lit; +} + +static void +add_to_ring_element___4size_t(int32_t lhs[256U], int32_t (*rhs)[256U], int32_t ret[256U]) +{ + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, lhs, int32_t, Eurydice_slice), + int32_t, + size_t); + i++) + { + size_t i0 = i; + size_t uu____0 = i0; + lhs[uu____0] = lhs[uu____0] + rhs[0U][i0]; + } + memcpy(ret, lhs, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_As_plus_e___4size_t( + int32_t (*matrix_A)[4U][256U], + int32_t (*s_as_ntt)[256U], + int32_t (*error_as_ntt)[256U], + int32_t ret[4U][256U] +) +{ + int32_t result[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + matrix_A, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [4U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = matrix_A[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*matrix_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(matrix_element, &s_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___4size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t coefficient_normal_form = libcrux_kyber_arithmetic_to_standard_domain(result[i1][j]); + int32_t + uu____1 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_as_ntt[i1][j]); + result[i1][j] = uu____1; + } + } + memcpy(ret, result, (size_t)4U * sizeof (int32_t [256U])); +} + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__s +{ + int32_t fst[4U][256U]; + int32_t snd[4U][256U]; + int32_t thd[4U][4U][256U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t_; + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t__s +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t_ + fst; + uint8_t snd[1568U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t_; + +static __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t_ +generate_keypair_unpacked___4size_t_1568size_t_1536size_t_2size_t_128size_t( + Eurydice_slice key_generation_seed +) +{ + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(key_generation_seed, hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + (size_t)32U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice seed_for_A = uu____0.fst; + Eurydice_slice seed_for_secret_and_error = uu____0.snd; + int32_t a_transpose[4U][4U][256U]; + uint8_t ret[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed_for_A, ret); + sample_matrix_A___4size_t(ret, true, a_transpose); + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(seed_for_secret_and_error, prf_input); + uint8_t uu____1[33U]; + memcpy(uu____1, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t + uu____2 = sample_vector_cbd_then_ntt___4size_t_2size_t_128size_t(uu____1, 0U); + int32_t secret_as_ntt[4U][256U]; + memcpy(secret_as_ntt, uu____2.fst, (size_t)4U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____2.snd; + uint8_t uu____3[33U]; + memcpy(uu____3, prf_input, (size_t)33U * sizeof (uint8_t)); + int32_t error_as_ntt[4U][256U]; + memcpy(error_as_ntt, + sample_vector_cbd_then_ntt___4size_t_2size_t_128size_t(uu____3, domain_separator).fst, + (size_t)4U * sizeof (int32_t [256U])); + int32_t t_as_ntt[4U][256U]; + compute_As_plus_e___4size_t(a_transpose, secret_as_ntt, error_as_ntt, t_as_ntt); + int32_t uu____4[4U][256U]; + memcpy(uu____4, t_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[1568U]; + serialize_public_key___4size_t_1536size_t_1568size_t(uu____4, + seed_for_A, + public_key_serialized); + for (size_t i0 = (size_t)0U; i0 < (size_t)4U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)256U; i++) + { + size_t j = i; + uint16_t uu____5 = libcrux_kyber_arithmetic_to_unsigned_representative(secret_as_ntt[i1][j]); + secret_as_ntt[i1][j] = (int32_t)uu____5; + uint16_t uu____6 = libcrux_kyber_arithmetic_to_unsigned_representative(t_as_ntt[i1][j]); + t_as_ntt[i1][j] = (int32_t)uu____6; + } + } + int32_t a_matrix[4U][4U][256U]; + memcpy(a_matrix, a_transpose, (size_t)4U * sizeof (int32_t [4U][256U])); + for (size_t i0 = (size_t)0U; i0 < (size_t)4U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t j = i; + memcpy(a_matrix[i1][j], a_transpose[j][i1], (size_t)256U * sizeof (int32_t)); + } + } + int32_t uu____7[4U][256U]; + memcpy(uu____7, secret_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + int32_t uu____8[4U][256U]; + memcpy(uu____8, t_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + int32_t uu____9[4U][4U][256U]; + memcpy(uu____9, a_matrix, (size_t)4U * sizeof (int32_t [4U][256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t_ + uu____10; + memcpy(uu____10.fst, uu____7, (size_t)4U * sizeof (int32_t [256U])); + memcpy(uu____10.snd, uu____8, (size_t)4U * sizeof (int32_t [256U])); + memcpy(uu____10.thd, uu____9, (size_t)4U * sizeof (int32_t [4U][256U])); + uint8_t uu____11[1568U]; + memcpy(uu____11, public_key_serialized, (size_t)1568U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t_ + lit; + lit.fst = uu____10; + memcpy(lit.snd, uu____11, (size_t)1568U * sizeof (uint8_t)); + return lit; +} + +typedef struct __uint8_t_1536size_t__uint8_t_1568size_t__s +{ + uint8_t fst[1536U]; + uint8_t snd[1568U]; +} +__uint8_t_1536size_t__uint8_t_1568size_t_; + +static __uint8_t_1536size_t__uint8_t_1568size_t_ +generate_keypair___4size_t_1536size_t_1568size_t_1536size_t_2size_t_128size_t( + Eurydice_slice key_generation_seed +) +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t_ + uu____0 = + generate_keypair_unpacked___4size_t_1568size_t_1536size_t_2size_t_128size_t(key_generation_seed); + int32_t secret_as_ntt[4U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)4U * sizeof (int32_t [256U])); + int32_t _t_as_ntt[4U][256U]; + memcpy(_t_as_ntt, uu____0.fst.snd, (size_t)4U * sizeof (int32_t [256U])); + int32_t _a_transpose[4U][4U][256U]; + memcpy(_a_transpose, uu____0.fst.thd, (size_t)4U * sizeof (int32_t [4U][256U])); + uint8_t public_key_serialized[1568U]; + memcpy(public_key_serialized, uu____0.snd, (size_t)1568U * sizeof (uint8_t)); + int32_t uu____1[4U][256U]; + memcpy(uu____1, secret_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + uint8_t secret_key_serialized[1536U]; + serialize_secret_key___4size_t_1536size_t(uu____1, secret_key_serialized); + uint8_t uu____2[1536U]; + memcpy(uu____2, secret_key_serialized, (size_t)1536U * sizeof (uint8_t)); + uint8_t uu____3[1568U]; + memcpy(uu____3, public_key_serialized, (size_t)1568U * sizeof (uint8_t)); + __uint8_t_1536size_t__uint8_t_1568size_t_ lit; + memcpy(lit.fst, uu____2, (size_t)1536U * sizeof (uint8_t)); + memcpy(lit.snd, uu____3, (size_t)1568U * sizeof (uint8_t)); + return lit; +} + +static inline void +serialize_kem_secret_key___3168size_t( + Eurydice_slice private_key, + Eurydice_slice public_key, + Eurydice_slice implicit_rejection_value, + uint8_t ret[3168U] +) +{ + uint8_t out[3168U] = { 0U }; + size_t pointer = (size_t)0U; + uint8_t *uu____0 = out; + size_t uu____1 = pointer; + size_t uu____2 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)3168U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = uu____1, + .end = uu____2 + core_slice___Slice_T___len(private_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + private_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(private_key, uint8_t, size_t); + uint8_t *uu____3 = out; + size_t uu____4 = pointer; + size_t uu____5 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)3168U, + uu____3, + ( + (core_ops_range_Range__size_t){ + .start = uu____4, + .end = uu____5 + core_slice___Slice_T___len(public_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + public_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(public_key, uint8_t, size_t); + Eurydice_slice + uu____6 = + Eurydice_array_to_subslice((size_t)3168U, + out, + ( + (core_ops_range_Range__size_t){ + .start = pointer, + .end = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[32U]; + libcrux_kyber_hash_functions_H(public_key, ret0); + core_slice___Slice_T___copy_from_slice(uu____6, + Eurydice_array_to_slice((size_t)32U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + pointer = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE; + uint8_t *uu____7 = out; + size_t uu____8 = pointer; + size_t uu____9 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)3168U, + uu____7, + ( + (core_ops_range_Range__size_t){ + .start = uu____8, + .end = uu____9 + core_slice___Slice_T___len(implicit_rejection_value, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + implicit_rejection_value, + uint8_t, + void *); + memcpy(ret, out, (size_t)3168U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemPrivateKey___3168size_t[3168U]; + +static void from___3168size_t(uint8_t value[3168U], uint8_t ret[3168U]) +{ + uint8_t uu____0[3168U]; + memcpy(uu____0, value, (size_t)3168U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)3168U * sizeof (uint8_t)); +} + +static libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t +from___3168size_t_1568size_t(uint8_t sk[3168U], uint8_t pk[1568U]) +{ + libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t lit; + memcpy(lit.sk, sk, (size_t)3168U * sizeof (uint8_t)); + memcpy(lit.pk, pk, (size_t)1568U * sizeof (uint8_t)); + return lit; +} + +static libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t +generate_keypair___4size_t_1536size_t_3168size_t_1568size_t_1536size_t_2size_t_128size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __uint8_t_1536size_t__uint8_t_1568size_t_ + uu____0 = + generate_keypair___4size_t_1536size_t_1568size_t_1536size_t_2size_t_128size_t(ind_cpa_keypair_randomness); + uint8_t ind_cpa_private_key[1536U]; + memcpy(ind_cpa_private_key, uu____0.fst, (size_t)1536U * sizeof (uint8_t)); + uint8_t public_key[1568U]; + memcpy(public_key, uu____0.snd, (size_t)1568U * sizeof (uint8_t)); + Eurydice_slice + uu____1 = Eurydice_array_to_slice((size_t)1536U, ind_cpa_private_key, uint8_t, Eurydice_slice); + uint8_t secret_key_serialized[3168U]; + serialize_kem_secret_key___3168size_t(uu____1, + Eurydice_array_to_slice((size_t)1568U, public_key, uint8_t, Eurydice_slice), + implicit_rejection_value, + secret_key_serialized); + uint8_t uu____2[3168U]; + memcpy(uu____2, secret_key_serialized, (size_t)3168U * sizeof (uint8_t)); + uint8_t private_key[3168U]; + from___3168size_t(uu____2, private_key); + uint8_t uu____3[3168U]; + memcpy(uu____3, private_key, (size_t)3168U * sizeof (uint8_t)); + uint8_t uu____4[1568U]; + memcpy(uu____4, public_key, (size_t)1568U * sizeof (uint8_t)); + return from___3168size_t_1568size_t(uu____3, uu____4); +} + +libcrux_kyber_types_MlKemKeyPair___3168size_t_1568size_t +libcrux_kyber_kyber1024_generate_key_pair(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair___4size_t_1536size_t_3168size_t_1568size_t_1536size_t_2size_t_128size_t(uu____0); +} + +static void from___1568size_t(uint8_t value[1568U], uint8_t ret[1568U]) +{ + uint8_t uu____0[1568U]; + memcpy(uu____0, value, (size_t)1568U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)1568U * sizeof (uint8_t)); +} + +static K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t__ +generate_keypair_unpacked___4size_t_1536size_t_3168size_t_1568size_t_1536size_t_2size_t_128size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t____libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__4size_t__uint8_t_1568size_t_ + uu____0 = + generate_keypair_unpacked___4size_t_1568size_t_1536size_t_2size_t_128size_t(ind_cpa_keypair_randomness); + int32_t secret_as_ntt[4U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)4U * sizeof (int32_t [256U])); + int32_t t_as_ntt[4U][256U]; + memcpy(t_as_ntt, uu____0.fst.snd, (size_t)4U * sizeof (int32_t [256U])); + int32_t a_transpose[4U][4U][256U]; + memcpy(a_transpose, uu____0.fst.thd, (size_t)4U * sizeof (int32_t [4U][256U])); + uint8_t ind_cpa_public_key[1568U]; + memcpy(ind_cpa_public_key, uu____0.snd, (size_t)1568U * sizeof (uint8_t)); + uint8_t ind_cpa_public_key_hash[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)1568U, + ind_cpa_public_key, + uint8_t, + Eurydice_slice), + ind_cpa_public_key_hash); + uint8_t rej[32U]; + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError dst; + Eurydice_slice_to_array2(&dst, + implicit_rejection_value, + Eurydice_slice, + uint8_t [32U], + void *); + core_result__core__result__Result_T__E___unwrap__uint8_t_32size_t__core_array_TryFromSliceError(dst, + rej); + uint8_t uu____1[1568U]; + memcpy(uu____1, ind_cpa_public_key, (size_t)1568U * sizeof (uint8_t)); + uint8_t pubkey[1568U]; + from___1568size_t(uu____1, pubkey); + int32_t uu____2[4U][256U]; + memcpy(uu____2, secret_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + int32_t uu____3[4U][256U]; + memcpy(uu____3, t_as_ntt, (size_t)4U * sizeof (int32_t [256U])); + int32_t uu____4[4U][4U][256U]; + memcpy(uu____4, a_transpose, (size_t)4U * sizeof (int32_t [4U][256U])); + uint8_t uu____5[32U]; + memcpy(uu____5, rej, (size_t)32U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, ind_cpa_public_key_hash, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t__ lit; + memcpy(lit.fst.secret_as_ntt, uu____2, (size_t)4U * sizeof (int32_t [256U])); + memcpy(lit.fst.t_as_ntt, uu____3, (size_t)4U * sizeof (int32_t [256U])); + memcpy(lit.fst.a_transpose, uu____4, (size_t)4U * sizeof (int32_t [4U][256U])); + memcpy(lit.fst.rej, uu____5, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.fst.ind_cpa_public_key_hash, uu____6, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.snd, pubkey, (size_t)1568U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_MlKemState__4size_t___libcrux_kyber_types_MlKemPublicKey__1568size_t__ +libcrux_kyber_kyber1024_generate_key_pair_unpacked(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair_unpacked___4size_t_1536size_t_3168size_t_1568size_t_1536size_t_2size_t_128size_t(uu____0); +} + +static uint8_t *as_slice___1568size_t(uint8_t (*self)[1568U]) +{ + return self[0U]; +} + +static inline void +deserialize_ring_elements_reduced___1536size_t_4size_t( + Eurydice_slice public_key, + int32_t ret[4U][256U] +) +{ + int32_t deserialized_pk[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +sample_ring_element_cbd___4size_t_128size_t_2size_t( + uint8_t *prf_input, + uint8_t *domain_separator, + int32_t ret[4U][256U] +) +{ + int32_t error_1[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(error_1[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator[0U]; + domain_separator[0U] = (uint32_t)domain_separator[0U] + 1U; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t uu____0[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + uu____0); + memcpy(error_1[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, error_1, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void invert_ntt_montgomery___4size_t(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT / (size_t)2U; + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)1U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)7U, re); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_vector_u___4size_t( + int32_t (*a_as_ntt)[4U][256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_1)[256U], + int32_t ret[4U][256U] +) +{ + int32_t result[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + a_as_ntt, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [4U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = a_as_ntt[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*a_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(a_element, &r_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___4size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[256U]; + invert_ntt_montgomery___4size_t(result[i1], uu____1); + memcpy(result[i1], uu____1, (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i1][j] * (int32_t)1441); + int32_t + uu____2 = libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_1[i1][j]); + result[i1][j] = uu____2; + } + } + memcpy(ret, result, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +compute_ring_element_v___4size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_2)[256U], + int32_t (*message)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&t_as_ntt[i0], &r_as_ntt[i0], product); + add_to_ring_element___4size_t(result, &product, result); + } + invert_ntt_montgomery___4size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t + uu____0 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + + error_2[0U][i0] + + message[0U][i0]); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static inline void compress_then_serialize_10___352size_t(int32_t re[256U], uint8_t ret[352U]) +{ + uint8_t serialized[352U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)4U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)4U, + .end = i0 * (size_t)4U + (size_t)4U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t + coefficient1 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + int32_t + coefficient2 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + int32_t + coefficient3 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + int32_t + coefficient4 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_10(coefficient1, + coefficient2, + coefficient3, + coefficient4); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + serialized[(size_t)5U * i0] = coef1; + serialized[(size_t)5U * i0 + (size_t)1U] = coef2; + serialized[(size_t)5U * i0 + (size_t)2U] = coef3; + serialized[(size_t)5U * i0 + (size_t)3U] = coef4; + serialized[(size_t)5U * i0 + (size_t)4U] = coef5; + } + memcpy(ret, serialized, (size_t)352U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_11___352size_t(int32_t re[256U], uint8_t ret[352U]) +{ + uint8_t serialized[352U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)8U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)8U, + .end = i0 * (size_t)8U + (size_t)8U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t + coefficient1 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + int32_t + coefficient2 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + int32_t + coefficient3 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + int32_t + coefficient4 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + int32_t + coefficient5 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)4U, + int32_t, + int32_t))); + int32_t + coefficient6 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)5U, + int32_t, + int32_t))); + int32_t + coefficient7 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)6U, + int32_t, + int32_t))); + int32_t + coefficient8 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)7U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_11(coefficient1, + coefficient2, + coefficient3, + coefficient4, + coefficient5, + coefficient6, + coefficient7, + coefficient8); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + uint8_t coef6 = uu____0.f5; + uint8_t coef7 = uu____0.f6; + uint8_t coef8 = uu____0.f7; + uint8_t coef9 = uu____0.f8; + uint8_t coef10 = uu____0.f9; + uint8_t coef11 = uu____0.f10; + serialized[(size_t)11U * i0] = coef1; + serialized[(size_t)11U * i0 + (size_t)1U] = coef2; + serialized[(size_t)11U * i0 + (size_t)2U] = coef3; + serialized[(size_t)11U * i0 + (size_t)3U] = coef4; + serialized[(size_t)11U * i0 + (size_t)4U] = coef5; + serialized[(size_t)11U * i0 + (size_t)5U] = coef6; + serialized[(size_t)11U * i0 + (size_t)6U] = coef7; + serialized[(size_t)11U * i0 + (size_t)7U] = coef8; + serialized[(size_t)11U * i0 + (size_t)8U] = coef9; + serialized[(size_t)11U * i0 + (size_t)9U] = coef10; + serialized[(size_t)11U * i0 + (size_t)10U] = coef11; + } + memcpy(ret, serialized, (size_t)352U * sizeof (uint8_t)); +} + +static inline void +compress_then_serialize_ring_element_u___11size_t_352size_t( + int32_t re[256U], + uint8_t ret[352U] +) +{ + uint8_t uu____0[352U]; + compress_then_serialize_11___352size_t(re, uu____0); + memcpy(ret, uu____0, (size_t)352U * sizeof (uint8_t)); +} + +static void +compress_then_serialize_u___4size_t_1408size_t_11size_t_352size_t( + int32_t input[4U][256U], + uint8_t ret[1408U] +) +{ + uint8_t out[1408U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)4U, + input, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, input[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)1408U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * ((size_t)1408U / (size_t)4U), + .end = (i0 + (size_t)1U) * ((size_t)1408U / (size_t)4U) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[352U]; + compress_then_serialize_ring_element_u___11size_t_352size_t(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)352U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)1408U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_4___160size_t(int32_t re[256U], uint8_t ret[160U]) +{ + uint8_t serialized[160U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)2U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)2U, + .end = i0 * (size_t)2U + (size_t)2U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t + coefficient1 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(4U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + uint8_t + coefficient2 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(4U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + serialized[i0] = (uint32_t)coefficient2 << 4U | (uint32_t)coefficient1; + } + memcpy(ret, serialized, (size_t)160U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_5___160size_t(int32_t re[256U], uint8_t ret[160U]) +{ + uint8_t serialized[160U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)8U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)8U, + .end = i0 * (size_t)8U + (size_t)8U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t + coefficient1 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + uint8_t + coefficient2 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + uint8_t + coefficient3 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + uint8_t + coefficient4 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + uint8_t + coefficient5 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)4U, + int32_t, + int32_t))); + uint8_t + coefficient6 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)5U, + int32_t, + int32_t))); + uint8_t + coefficient7 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)6U, + int32_t, + int32_t))); + uint8_t + coefficient8 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)7U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_5(coefficient2, + coefficient1, + coefficient4, + coefficient3, + coefficient5, + coefficient7, + coefficient6, + coefficient8); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + serialized[(size_t)5U * i0] = coef1; + serialized[(size_t)5U * i0 + (size_t)1U] = coef2; + serialized[(size_t)5U * i0 + (size_t)2U] = coef3; + serialized[(size_t)5U * i0 + (size_t)3U] = coef4; + serialized[(size_t)5U * i0 + (size_t)4U] = coef5; + } + memcpy(ret, serialized, (size_t)160U * sizeof (uint8_t)); +} + +static inline void +compress_then_serialize_ring_element_v___5size_t_160size_t(int32_t re[256U], uint8_t ret[160U]) +{ + uint8_t uu____0[160U]; + compress_then_serialize_5___160size_t(re, uu____0); + memcpy(ret, uu____0, (size_t)160U * sizeof (uint8_t)); +} + +static inline void into_padded_array___1568size_t(Eurydice_slice slice, uint8_t ret[1568U]) +{ + uint8_t out[1568U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1568U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)1568U * sizeof (uint8_t)); +} + +static void +encrypt_unpacked___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*a_transpose)[4U][256U], + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[1568U] +) +{ + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(randomness, prf_input); + uint8_t uu____0[33U]; + memcpy(uu____0, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_4size_t__uint8_t + uu____1 = sample_vector_cbd_then_ntt___4size_t_2size_t_128size_t(uu____0, 0U); + int32_t r_as_ntt[4U][256U]; + memcpy(r_as_ntt, uu____1.fst, (size_t)4U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____1.snd; + int32_t error_1[4U][256U]; + sample_ring_element_cbd___4size_t_128size_t_2size_t(prf_input, &domain_separator, error_1); + prf_input[32U] = domain_separator; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t error_2[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + error_2); + int32_t u[4U][256U]; + compute_vector_u___4size_t(a_transpose, r_as_ntt, error_1, u); + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + int32_t message_as_ring_element[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_message(uu____2, message_as_ring_element); + int32_t v[256U]; + compute_ring_element_v___4size_t(t_as_ntt, r_as_ntt, &error_2, &message_as_ring_element, v); + int32_t uu____3[4U][256U]; + memcpy(uu____3, u, (size_t)4U * sizeof (int32_t [256U])); + uint8_t c1[1408U]; + compress_then_serialize_u___4size_t_1408size_t_11size_t_352size_t(uu____3, c1); + uint8_t c2[160U]; + compress_then_serialize_ring_element_v___5size_t_160size_t(v, c2); + uint8_t ciphertext[1568U]; + into_padded_array___1568size_t(Eurydice_array_to_slice((size_t)1408U, + c1, + uint8_t, + Eurydice_slice), + ciphertext); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)1568U, + ciphertext, + (size_t)1408U, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + core_array___Array_T__N__23__as_slice((size_t)160U, c2, uint8_t, Eurydice_slice), + uint8_t, + void *); + memcpy(ret, ciphertext, (size_t)1568U * sizeof (uint8_t)); +} + +static void +encrypt___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t( + Eurydice_slice public_key, + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[1568U] +) +{ + int32_t t_as_ntt[4U][256U]; + deserialize_ring_elements_reduced___1536size_t_4size_t(Eurydice_slice_subslice_to(public_key, + (size_t)1536U, + uint8_t, + size_t, + Eurydice_slice), + t_as_ntt); + Eurydice_slice + seed = Eurydice_slice_subslice_from(public_key, (size_t)1536U, uint8_t, size_t, Eurydice_slice); + int32_t a_transpose[4U][4U][256U]; + uint8_t ret0[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed, ret0); + sample_matrix_A___4size_t(ret0, false, a_transpose); + int32_t (*uu____0)[256U] = t_as_ntt; + int32_t (*uu____1)[4U][256U] = a_transpose; + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + uint8_t ret1[1568U]; + encrypt_unpacked___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t(uu____0, + uu____1, + uu____2, + randomness, + ret1); + memcpy(ret, ret1, (size_t)1568U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemCiphertext___1568size_t[1568U]; + +static K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t_ +encapsulate___4size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t( + uint8_t (*public_key)[1568U], + uint8_t randomness[32U] +) +{ + uint8_t to_hash[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + randomness, + uint8_t, + Eurydice_slice), + to_hash); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice_from((size_t)64U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + size_t, + Eurydice_slice); + uint8_t ret[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)1568U, + as_slice___1568size_t(public_key), + uint8_t, + Eurydice_slice), + ret); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)32U, ret, uint8_t, Eurydice_slice), + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____1.fst; + Eurydice_slice pseudorandomness = uu____1.snd; + Eurydice_slice + uu____2 = + Eurydice_array_to_slice((size_t)1568U, + as_slice___1568size_t(public_key), + uint8_t, + Eurydice_slice); + uint8_t uu____3[32U]; + memcpy(uu____3, randomness, (size_t)32U * sizeof (uint8_t)); + uint8_t ciphertext[1568U]; + encrypt___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t(uu____2, + uu____3, + pseudorandomness, + ciphertext); + uint8_t shared_secret_array[32U] = { 0U }; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_slice((size_t)32U, + shared_secret_array, + uint8_t, + Eurydice_slice), + shared_secret, + uint8_t, + void *); + uint8_t uu____4[1568U]; + memcpy(uu____4, ciphertext, (size_t)1568U * sizeof (uint8_t)); + uint8_t uu____5[1568U]; + memcpy(uu____5, uu____4, (size_t)1568U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, shared_secret_array, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t_ lit; + memcpy(lit.fst, uu____5, (size_t)1568U * sizeof (uint8_t)); + memcpy(lit.snd, uu____6, (size_t)32U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_types_MlKemCiphertext__1568size_t___uint8_t_32size_t_ +libcrux_kyber_kyber1024_encapsulate(uint8_t (*public_key)[1568U], uint8_t randomness[32U]) +{ + uint8_t (*uu____0)[1568U] = public_key; + uint8_t uu____1[32U]; + memcpy(uu____1, randomness, (size_t)32U * sizeof (uint8_t)); + return + encapsulate___4size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t(uu____0, + uu____1); +} + +static K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t +split_at___3168size_t(uint8_t (*self)[3168U], size_t mid) +{ + return + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)3168U, + self[0U], + uint8_t, + Eurydice_slice), + mid, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); +} + +static inline void +deserialize_secret_key___4size_t(Eurydice_slice secret_key, int32_t ret[4U][256U]) +{ + int32_t secret_as_ntt[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(secret_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(secret_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + secret_bytes = + Eurydice_slice_subslice(secret_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_uncompressed_ring_element(secret_bytes, uu____0); + memcpy(secret_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, secret_as_ntt, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +deserialize_then_decompress_ring_element_u___11size_t( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_11(serialized, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +static inline void ntt_vector_u___11size_t(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = (size_t)0U; + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)7U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)1U, re); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +deserialize_then_decompress_u___4size_t_1568size_t_11size_t( + uint8_t *ciphertext, + int32_t ret[4U][256U] +) +{ + int32_t u_as_ntt[4U][256U]; + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + memcpy(u_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)1568U, + ciphertext, + uint8_t, + Eurydice_slice), + uint8_t, + size_t) + / (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)11U / (size_t)8U); + i++) + { + size_t i0 = i; + Eurydice_slice + u_bytes = + Eurydice_array_to_subslice((size_t)1568U, + ciphertext, + ( + (core_ops_range_Range__size_t){ + .start = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)11U / (size_t)8U), + .end = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)11U / (size_t)8U) + + LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)11U / (size_t)8U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t u[256U]; + deserialize_then_decompress_ring_element_u___11size_t(u_bytes, u); + int32_t uu____0[256U]; + ntt_vector_u___11size_t(u, uu____0); + memcpy(u_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, u_as_ntt, (size_t)4U * sizeof (int32_t [256U])); +} + +static inline void +deserialize_then_decompress_ring_element_v___5size_t( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_5(serialized, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_message___4size_t( + int32_t (*v)[256U], + int32_t (*secret_as_ntt)[256U], + int32_t (*u_as_ntt)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)4U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&secret_as_ntt[i0], &u_as_ntt[i0], product); + add_to_ring_element___4size_t(result, &product, result); + } + invert_ntt_montgomery___4size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(v[0U][i0] - coefficient_normal_form); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static void +decrypt_unpacked___4size_t_1568size_t_1408size_t_11size_t_5size_t( + int32_t (*secret_as_ntt)[256U], + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t u_as_ntt[4U][256U]; + deserialize_then_decompress_u___4size_t_1568size_t_11size_t(ciphertext, u_as_ntt); + int32_t v[256U]; + deserialize_then_decompress_ring_element_v___5size_t(Eurydice_array_to_subslice_from((size_t)1568U, + ciphertext, + (size_t)1408U, + uint8_t, + size_t, + Eurydice_slice), + v); + int32_t message[256U]; + compute_message___4size_t(&v, secret_as_ntt, u_as_ntt, message); + uint8_t ret0[32U]; + libcrux_kyber_serialize_compress_then_serialize_message(message, ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decrypt___4size_t_1568size_t_1408size_t_11size_t_5size_t( + Eurydice_slice secret_key, + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t secret_as_ntt[4U][256U]; + deserialize_secret_key___4size_t(secret_key, secret_as_ntt); + uint8_t ret0[32U]; + decrypt_unpacked___4size_t_1568size_t_1408size_t_11size_t_5size_t(secret_as_ntt, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static inline void into_padded_array___1600size_t(Eurydice_slice slice, uint8_t ret[1600U]) +{ + uint8_t out[1600U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1600U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)1600U * sizeof (uint8_t)); +} + +static Eurydice_slice as_ref___1568size_t(uint8_t (*self)[1568U]) +{ + return Eurydice_array_to_slice((size_t)1568U, self[0U], uint8_t, Eurydice_slice); +} + +static uint8_t +compare_ciphertexts_in_constant_time___1568size_t(Eurydice_slice lhs, Eurydice_slice rhs) +{ + uint8_t r = 0U; + for (size_t i = (size_t)0U; i < (size_t)1568U; i++) + { + size_t i0 = i; + uint8_t uu____0 = Eurydice_slice_index(lhs, i0, uint8_t, uint8_t); + r = + (uint32_t)r + | ((uint32_t)uu____0 ^ (uint32_t)Eurydice_slice_index(rhs, i0, uint8_t, uint8_t)); + } + return libcrux_kyber_constant_time_ops_is_non_zero(r); +} + +static void +decapsulate___4size_t_3168size_t_1536size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t_1600size_t( + uint8_t (*secret_key)[3168U], + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +) +{ + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = split_at___3168size_t(secret_key, (size_t)1536U); + Eurydice_slice ind_cpa_secret_key = uu____0.fst; + Eurydice_slice secret_key0 = uu____0.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(secret_key0, + (size_t)1568U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key = uu____1.fst; + Eurydice_slice secret_key1 = uu____1.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____2 = + core_slice___Slice_T___split_at(secret_key1, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key_hash = uu____2.fst; + Eurydice_slice implicit_rejection_value = uu____2.snd; + uint8_t decrypted[32U]; + decrypt___4size_t_1568size_t_1408size_t_11size_t_5size_t(ind_cpa_secret_key, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____3 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____3.fst; + Eurydice_slice pseudorandomness = uu____3.snd; + uint8_t to_hash[1600U]; + into_padded_array___1600size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)1600U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + as_ref___1568size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)1600U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + Eurydice_slice uu____5 = ind_cpa_public_key; + uint8_t uu____6[32U]; + memcpy(uu____6, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[1568U]; + encrypt___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t(uu____5, + uu____6, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____7 = as_ref___1568size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___1568size_t(uu____7, + Eurydice_array_to_slice((size_t)1568U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____8 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____8, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber1024_decapsulate( + uint8_t (*secret_key)[3168U], + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate___4size_t_3168size_t_1536size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t_1600size_t(secret_key, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decapsulate_unpacked___4size_t_3168size_t_1536size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t_1600size_t( + libcrux_kyber_MlKemState___4size_t *state, + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +) +{ + int32_t (*secret_as_ntt)[256U] = state->secret_as_ntt; + int32_t (*t_as_ntt)[256U] = state->t_as_ntt; + int32_t (*a_transpose)[4U][256U] = state->a_transpose; + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_slice((size_t)32U, + state->rej, + uint8_t, + Eurydice_slice); + Eurydice_slice + ind_cpa_public_key_hash = + Eurydice_array_to_slice((size_t)32U, + state->ind_cpa_public_key_hash, + uint8_t, + Eurydice_slice); + uint8_t decrypted[32U]; + decrypt_unpacked___4size_t_1568size_t_1408size_t_11size_t_5size_t(secret_as_ntt, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____0.fst; + Eurydice_slice pseudorandomness = uu____0.snd; + uint8_t to_hash[1600U]; + into_padded_array___1600size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____1 = + Eurydice_array_to_subslice_from((size_t)1600U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____1, + as_ref___1568size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)1600U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + int32_t (*uu____2)[256U] = t_as_ntt; + int32_t (*uu____3)[4U][256U] = a_transpose; + uint8_t uu____4[32U]; + memcpy(uu____4, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[1568U]; + encrypt_unpacked___4size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t(uu____2, + uu____3, + uu____4, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____5 = as_ref___1568size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___1568size_t(uu____5, + Eurydice_array_to_slice((size_t)1568U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____6 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____6, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber1024_decapsulate_unpacked( + libcrux_kyber_MlKemState___4size_t *state, + uint8_t (*ciphertext)[1568U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate_unpacked___4size_t_3168size_t_1536size_t_1568size_t_1568size_t_1536size_t_1408size_t_160size_t_11size_t_5size_t_352size_t_2size_t_128size_t_2size_t_128size_t_1600size_t(state, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + diff --git a/libcrux/src/libcrux_kyber512.c b/libcrux/src/libcrux_kyber512.c new file mode 100644 index 000000000..610611eaa --- /dev/null +++ b/libcrux/src/libcrux_kyber512.c @@ -0,0 +1,1989 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#include "libcrux_kyber512.h" + +#include "internal/libcrux_kyber_common.h" +#include "internal/libcrux_kyber768.h" +#include "libcrux_hacl_glue.h" + +static inline void +deserialize_ring_elements_reduced___800size_t_2size_t( + Eurydice_slice public_key, + int32_t ret[2U][256U] +) +{ + int32_t deserialized_pk[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +serialize_secret_key___2size_t_768size_t(int32_t key[2U][256U], uint8_t ret[768U]) +{ + uint8_t out[768U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + key, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, key[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)768U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = (i0 + (size_t)1U) * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[384U]; + libcrux_kyber_serialize_serialize_uncompressed_ring_element(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)384U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)768U * sizeof (uint8_t)); +} + +static inline void +serialize_public_key___2size_t_768size_t_800size_t( + int32_t t_as_ntt[2U][256U], + Eurydice_slice seed_for_a, + uint8_t ret[800U] +) +{ + uint8_t public_key_serialized[800U] = { 0U }; + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)800U, + public_key_serialized, + ((core_ops_range_Range__size_t){ .start = (size_t)0U, .end = (size_t)768U }), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____1[2U][256U]; + memcpy(uu____1, t_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + uint8_t ret0[768U]; + serialize_secret_key___2size_t_768size_t(uu____1, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)768U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)800U, + public_key_serialized, + (size_t)768U, + uint8_t, + size_t, + Eurydice_slice), + seed_for_a, + uint8_t, + void *); + memcpy(ret, public_key_serialized, (size_t)800U * sizeof (uint8_t)); +} + +static bool validate_public_key___2size_t_768size_t_800size_t(uint8_t *public_key) +{ + int32_t deserialized_pk[2U][256U]; + deserialize_ring_elements_reduced___800size_t_2size_t(Eurydice_array_to_subslice_to((size_t)800U, + public_key, + (size_t)768U, + uint8_t, + size_t, + Eurydice_slice), + deserialized_pk); + int32_t uu____0[2U][256U]; + memcpy(uu____0, deserialized_pk, (size_t)2U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[800U]; + serialize_public_key___2size_t_768size_t_800size_t(uu____0, + Eurydice_array_to_subslice_from((size_t)800U, + public_key, + (size_t)768U, + uint8_t, + size_t, + Eurydice_slice), + public_key_serialized); + return + core_array_equality___core__cmp__PartialEq__Array_B__N___for__Array_A__N____eq((size_t)800U, + public_key, + public_key_serialized, + uint8_t, + uint8_t, + bool); +} + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__ +libcrux_kyber_kyber512_validate_public_key(uint8_t public_key[800U]) +{ + core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__ uu____0; + if (validate_public_key___2size_t_768size_t_800size_t(public_key)) + { + core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__ lit; + lit.tag = core_option_Some; + memcpy(lit.f0, public_key, (size_t)800U * sizeof (uint8_t)); + uu____0 = lit; + } + else + { + uu____0 = + ( + (core_option_Option__libcrux_kyber_types_MlKemPublicKey__800size_t__){ + .tag = core_option_None + } + ); + } + return uu____0; +} + +static inline libcrux_digest_incremental_x4_Shake128StateX4 +absorb___2size_t(uint8_t input[2U][34U]) +{ + libcrux_digest_incremental_x4_Shake128StateX4 + state = libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__new(); + Eurydice_slice data[2U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + uint8_t buf[1U] = { 0U }; + data[i] = Eurydice_array_to_slice((size_t)1U, buf, uint8_t, Eurydice_slice); + } + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + Eurydice_slice + uu____0 = Eurydice_array_to_slice((size_t)34U, input[i0], uint8_t, Eurydice_slice); + data[i0] = uu____0; + } + libcrux_digest_incremental_x4_Shake128StateX4 *uu____1 = &state; + Eurydice_slice uu____2[2U]; + memcpy(uu____2, data, (size_t)2U * sizeof (Eurydice_slice)); + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final((size_t)2U, + uu____1, + uu____2, + void *); + return state; +} + +static inline void +squeeze_three_blocks___2size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[2U][504U] +) +{ + uint8_t output[2U][504U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)504U, + (size_t)2U, + xof_state, + output, + void *); + uint8_t out[2U][504U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + uint8_t uu____0[504U]; + memcpy(uu____0, output[i0], (size_t)504U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)504U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)2U * sizeof (uint8_t [504U])); +} + +static bool +sample_from_uniform_distribution_next___2size_t_504size_t( + uint8_t randomness[2U][504U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)504U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static inline void +squeeze_block___2size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[2U][168U] +) +{ + uint8_t output[2U][168U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)168U, + (size_t)2U, + xof_state, + output, + void *); + uint8_t out[2U][168U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + uint8_t uu____0[168U]; + memcpy(uu____0, output[i0], (size_t)168U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)168U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)2U * sizeof (uint8_t [168U])); +} + +static bool +sample_from_uniform_distribution_next___2size_t_168size_t( + uint8_t randomness[2U][168U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)168U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static void sample_from_xof___2size_t(uint8_t seeds[2U][34U], int32_t ret[2U][256U]) +{ + size_t sampled_coefficients[2U] = { 0U }; + int32_t out[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(out[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + uint8_t uu____0[2U][34U]; + memcpy(uu____0, seeds, (size_t)2U * sizeof (uint8_t [34U])); + libcrux_digest_incremental_x4_Shake128StateX4 xof_state = absorb___2size_t(uu____0); + uint8_t randomness0[2U][504U]; + squeeze_three_blocks___2size_t(&xof_state, randomness0); + uint8_t uu____1[2U][504U]; + memcpy(uu____1, randomness0, (size_t)2U * sizeof (uint8_t [504U])); + bool + done = + sample_from_uniform_distribution_next___2size_t_504size_t(uu____1, + sampled_coefficients, + out); + while (true) + { + if (!!done) + { + break; + } + uint8_t randomness[2U][168U]; + squeeze_block___2size_t(&xof_state, randomness); + uint8_t uu____2[2U][168U]; + memcpy(uu____2, randomness, (size_t)2U * sizeof (uint8_t [168U])); + done = + sample_from_uniform_distribution_next___2size_t_168size_t(uu____2, + sampled_coefficients, + out); + } + libcrux_kyber_hash_functions_free_state(xof_state); + memcpy(ret, out, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +sample_matrix_A___2size_t(uint8_t seed[34U], bool transpose, int32_t ret[2U][2U][256U]) +{ + int32_t A_transpose[2U][2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(A_transpose[i][0U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][1U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i0 = (size_t)0U; i0 < (size_t)2U; i0++) + { + size_t i1 = i0; + uint8_t uu____0[34U]; + memcpy(uu____0, seed, (size_t)34U * sizeof (uint8_t)); + uint8_t seeds[2U][34U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(seeds[i], uu____0, (size_t)34U * sizeof (uint8_t)); + } + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t j = i; + seeds[j][32U] = (uint8_t)i1; + seeds[j][33U] = (uint8_t)j; + } + uint8_t uu____1[2U][34U]; + memcpy(uu____1, seeds, (size_t)2U * sizeof (uint8_t [34U])); + int32_t sampled[2U][256U]; + sample_from_xof___2size_t(uu____1, sampled); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t j = i; + if (transpose) + { + memcpy(A_transpose[j][i1], sampled[j], (size_t)256U * sizeof (int32_t)); + } + else + { + memcpy(A_transpose[i1][j], sampled[j], (size_t)256U * sizeof (int32_t)); + } + } + } + memcpy(ret, A_transpose, (size_t)2U * sizeof (int32_t [2U][256U])); +} + +static void PRF___192size_t(Eurydice_slice input, uint8_t ret[192U]) +{ + uint8_t ret0[192U]; + libcrux_digest_shake256((size_t)192U, input, ret0, void *); + memcpy(ret, ret0, (size_t)192U * sizeof (uint8_t)); +} + +static inline void +sample_from_binomial_distribution___3size_t(Eurydice_slice randomness, int32_t ret[256U]) +{ + int32_t uu____0[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution_3(randomness, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +typedef struct __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t_s +{ + int32_t fst[2U][256U]; + uint8_t snd; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t; + +static inline __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t +sample_vector_cbd_then_ntt___2size_t_3size_t_192size_t( + uint8_t prf_input[33U], + uint8_t domain_separator +) +{ + int32_t re_as_ntt[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(re_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator; + domain_separator = (uint32_t)domain_separator + 1U; + uint8_t prf_output[192U]; + PRF___192size_t(Eurydice_array_to_slice((size_t)33U, prf_input, uint8_t, Eurydice_slice), + prf_output); + int32_t r[256U]; + sample_from_binomial_distribution___3size_t(Eurydice_array_to_slice((size_t)192U, + prf_output, + uint8_t, + Eurydice_slice), + r); + int32_t uu____0[256U]; + libcrux_kyber_ntt_ntt_binomially_sampled_ring_element(r, uu____0); + memcpy(re_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[2U][256U]; + memcpy(uu____1, re_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t lit; + memcpy(lit.fst, uu____1, (size_t)2U * sizeof (int32_t [256U])); + lit.snd = domain_separator; + return lit; +} + +static void +add_to_ring_element___2size_t(int32_t lhs[256U], int32_t (*rhs)[256U], int32_t ret[256U]) +{ + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, lhs, int32_t, Eurydice_slice), + int32_t, + size_t); + i++) + { + size_t i0 = i; + size_t uu____0 = i0; + lhs[uu____0] = lhs[uu____0] + rhs[0U][i0]; + } + memcpy(ret, lhs, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_As_plus_e___2size_t( + int32_t (*matrix_A)[2U][256U], + int32_t (*s_as_ntt)[256U], + int32_t (*error_as_ntt)[256U], + int32_t ret[2U][256U] +) +{ + int32_t result[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + matrix_A, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [2U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = matrix_A[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*matrix_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(matrix_element, &s_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___2size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t coefficient_normal_form = libcrux_kyber_arithmetic_to_standard_domain(result[i1][j]); + int32_t + uu____1 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_as_ntt[i1][j]); + result[i1][j] = uu____1; + } + } + memcpy(ret, result, (size_t)2U * sizeof (int32_t [256U])); +} + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__s +{ + int32_t fst[2U][256U]; + int32_t snd[2U][256U]; + int32_t thd[2U][2U][256U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t_; + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t__s +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t_ + fst; + uint8_t snd[800U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t_; + +static __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t_ +generate_keypair_unpacked___2size_t_800size_t_768size_t_3size_t_192size_t( + Eurydice_slice key_generation_seed +) +{ + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(key_generation_seed, hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + (size_t)32U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice seed_for_A = uu____0.fst; + Eurydice_slice seed_for_secret_and_error = uu____0.snd; + int32_t a_transpose[2U][2U][256U]; + uint8_t ret[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed_for_A, ret); + sample_matrix_A___2size_t(ret, true, a_transpose); + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(seed_for_secret_and_error, prf_input); + uint8_t uu____1[33U]; + memcpy(uu____1, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t + uu____2 = sample_vector_cbd_then_ntt___2size_t_3size_t_192size_t(uu____1, 0U); + int32_t secret_as_ntt[2U][256U]; + memcpy(secret_as_ntt, uu____2.fst, (size_t)2U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____2.snd; + uint8_t uu____3[33U]; + memcpy(uu____3, prf_input, (size_t)33U * sizeof (uint8_t)); + int32_t error_as_ntt[2U][256U]; + memcpy(error_as_ntt, + sample_vector_cbd_then_ntt___2size_t_3size_t_192size_t(uu____3, domain_separator).fst, + (size_t)2U * sizeof (int32_t [256U])); + int32_t t_as_ntt[2U][256U]; + compute_As_plus_e___2size_t(a_transpose, secret_as_ntt, error_as_ntt, t_as_ntt); + int32_t uu____4[2U][256U]; + memcpy(uu____4, t_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[800U]; + serialize_public_key___2size_t_768size_t_800size_t(uu____4, seed_for_A, public_key_serialized); + for (size_t i0 = (size_t)0U; i0 < (size_t)2U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)256U; i++) + { + size_t j = i; + uint16_t uu____5 = libcrux_kyber_arithmetic_to_unsigned_representative(secret_as_ntt[i1][j]); + secret_as_ntt[i1][j] = (int32_t)uu____5; + uint16_t uu____6 = libcrux_kyber_arithmetic_to_unsigned_representative(t_as_ntt[i1][j]); + t_as_ntt[i1][j] = (int32_t)uu____6; + } + } + int32_t a_matrix[2U][2U][256U]; + memcpy(a_matrix, a_transpose, (size_t)2U * sizeof (int32_t [2U][256U])); + for (size_t i0 = (size_t)0U; i0 < (size_t)2U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t j = i; + memcpy(a_matrix[i1][j], a_transpose[j][i1], (size_t)256U * sizeof (int32_t)); + } + } + int32_t uu____7[2U][256U]; + memcpy(uu____7, secret_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + int32_t uu____8[2U][256U]; + memcpy(uu____8, t_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + int32_t uu____9[2U][2U][256U]; + memcpy(uu____9, a_matrix, (size_t)2U * sizeof (int32_t [2U][256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t_ + uu____10; + memcpy(uu____10.fst, uu____7, (size_t)2U * sizeof (int32_t [256U])); + memcpy(uu____10.snd, uu____8, (size_t)2U * sizeof (int32_t [256U])); + memcpy(uu____10.thd, uu____9, (size_t)2U * sizeof (int32_t [2U][256U])); + uint8_t uu____11[800U]; + memcpy(uu____11, public_key_serialized, (size_t)800U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t_ + lit; + lit.fst = uu____10; + memcpy(lit.snd, uu____11, (size_t)800U * sizeof (uint8_t)); + return lit; +} + +typedef struct __uint8_t_768size_t__uint8_t_800size_t__s +{ + uint8_t fst[768U]; + uint8_t snd[800U]; +} +__uint8_t_768size_t__uint8_t_800size_t_; + +static __uint8_t_768size_t__uint8_t_800size_t_ +generate_keypair___2size_t_768size_t_800size_t_768size_t_3size_t_192size_t( + Eurydice_slice key_generation_seed +) +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t_ + uu____0 = + generate_keypair_unpacked___2size_t_800size_t_768size_t_3size_t_192size_t(key_generation_seed); + int32_t secret_as_ntt[2U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)2U * sizeof (int32_t [256U])); + int32_t _t_as_ntt[2U][256U]; + memcpy(_t_as_ntt, uu____0.fst.snd, (size_t)2U * sizeof (int32_t [256U])); + int32_t _a_transpose[2U][2U][256U]; + memcpy(_a_transpose, uu____0.fst.thd, (size_t)2U * sizeof (int32_t [2U][256U])); + uint8_t public_key_serialized[800U]; + memcpy(public_key_serialized, uu____0.snd, (size_t)800U * sizeof (uint8_t)); + int32_t uu____1[2U][256U]; + memcpy(uu____1, secret_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + uint8_t secret_key_serialized[768U]; + serialize_secret_key___2size_t_768size_t(uu____1, secret_key_serialized); + uint8_t uu____2[768U]; + memcpy(uu____2, secret_key_serialized, (size_t)768U * sizeof (uint8_t)); + uint8_t uu____3[800U]; + memcpy(uu____3, public_key_serialized, (size_t)800U * sizeof (uint8_t)); + __uint8_t_768size_t__uint8_t_800size_t_ lit; + memcpy(lit.fst, uu____2, (size_t)768U * sizeof (uint8_t)); + memcpy(lit.snd, uu____3, (size_t)800U * sizeof (uint8_t)); + return lit; +} + +static inline void +serialize_kem_secret_key___1632size_t( + Eurydice_slice private_key, + Eurydice_slice public_key, + Eurydice_slice implicit_rejection_value, + uint8_t ret[1632U] +) +{ + uint8_t out[1632U] = { 0U }; + size_t pointer = (size_t)0U; + uint8_t *uu____0 = out; + size_t uu____1 = pointer; + size_t uu____2 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1632U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = uu____1, + .end = uu____2 + core_slice___Slice_T___len(private_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + private_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(private_key, uint8_t, size_t); + uint8_t *uu____3 = out; + size_t uu____4 = pointer; + size_t uu____5 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1632U, + uu____3, + ( + (core_ops_range_Range__size_t){ + .start = uu____4, + .end = uu____5 + core_slice___Slice_T___len(public_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + public_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(public_key, uint8_t, size_t); + Eurydice_slice + uu____6 = + Eurydice_array_to_subslice((size_t)1632U, + out, + ( + (core_ops_range_Range__size_t){ + .start = pointer, + .end = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[32U]; + libcrux_kyber_hash_functions_H(public_key, ret0); + core_slice___Slice_T___copy_from_slice(uu____6, + Eurydice_array_to_slice((size_t)32U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + pointer = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE; + uint8_t *uu____7 = out; + size_t uu____8 = pointer; + size_t uu____9 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1632U, + uu____7, + ( + (core_ops_range_Range__size_t){ + .start = uu____8, + .end = uu____9 + core_slice___Slice_T___len(implicit_rejection_value, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + implicit_rejection_value, + uint8_t, + void *); + memcpy(ret, out, (size_t)1632U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemPrivateKey___1632size_t[1632U]; + +static void from___1632size_t(uint8_t value[1632U], uint8_t ret[1632U]) +{ + uint8_t uu____0[1632U]; + memcpy(uu____0, value, (size_t)1632U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)1632U * sizeof (uint8_t)); +} + +static libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t +from___1632size_t_800size_t(uint8_t sk[1632U], uint8_t pk[800U]) +{ + libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t lit; + memcpy(lit.sk, sk, (size_t)1632U * sizeof (uint8_t)); + memcpy(lit.pk, pk, (size_t)800U * sizeof (uint8_t)); + return lit; +} + +static libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t +generate_keypair___2size_t_768size_t_1632size_t_800size_t_768size_t_3size_t_192size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __uint8_t_768size_t__uint8_t_800size_t_ + uu____0 = + generate_keypair___2size_t_768size_t_800size_t_768size_t_3size_t_192size_t(ind_cpa_keypair_randomness); + uint8_t ind_cpa_private_key[768U]; + memcpy(ind_cpa_private_key, uu____0.fst, (size_t)768U * sizeof (uint8_t)); + uint8_t public_key[800U]; + memcpy(public_key, uu____0.snd, (size_t)800U * sizeof (uint8_t)); + Eurydice_slice + uu____1 = Eurydice_array_to_slice((size_t)768U, ind_cpa_private_key, uint8_t, Eurydice_slice); + uint8_t secret_key_serialized[1632U]; + serialize_kem_secret_key___1632size_t(uu____1, + Eurydice_array_to_slice((size_t)800U, public_key, uint8_t, Eurydice_slice), + implicit_rejection_value, + secret_key_serialized); + uint8_t uu____2[1632U]; + memcpy(uu____2, secret_key_serialized, (size_t)1632U * sizeof (uint8_t)); + uint8_t private_key[1632U]; + from___1632size_t(uu____2, private_key); + uint8_t uu____3[1632U]; + memcpy(uu____3, private_key, (size_t)1632U * sizeof (uint8_t)); + uint8_t uu____4[800U]; + memcpy(uu____4, public_key, (size_t)800U * sizeof (uint8_t)); + return from___1632size_t_800size_t(uu____3, uu____4); +} + +libcrux_kyber_types_MlKemKeyPair___1632size_t_800size_t +libcrux_kyber_kyber512_generate_key_pair(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair___2size_t_768size_t_1632size_t_800size_t_768size_t_3size_t_192size_t(uu____0); +} + +static void from___800size_t(uint8_t value[800U], uint8_t ret[800U]) +{ + uint8_t uu____0[800U]; + memcpy(uu____0, value, (size_t)800U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)800U * sizeof (uint8_t)); +} + +static K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t__ +generate_keypair_unpacked___2size_t_768size_t_1632size_t_800size_t_768size_t_3size_t_192size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t____libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__2size_t__uint8_t_800size_t_ + uu____0 = + generate_keypair_unpacked___2size_t_800size_t_768size_t_3size_t_192size_t(ind_cpa_keypair_randomness); + int32_t secret_as_ntt[2U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)2U * sizeof (int32_t [256U])); + int32_t t_as_ntt[2U][256U]; + memcpy(t_as_ntt, uu____0.fst.snd, (size_t)2U * sizeof (int32_t [256U])); + int32_t a_transpose[2U][2U][256U]; + memcpy(a_transpose, uu____0.fst.thd, (size_t)2U * sizeof (int32_t [2U][256U])); + uint8_t ind_cpa_public_key[800U]; + memcpy(ind_cpa_public_key, uu____0.snd, (size_t)800U * sizeof (uint8_t)); + uint8_t ind_cpa_public_key_hash[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)800U, + ind_cpa_public_key, + uint8_t, + Eurydice_slice), + ind_cpa_public_key_hash); + uint8_t rej[32U]; + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError dst; + Eurydice_slice_to_array2(&dst, + implicit_rejection_value, + Eurydice_slice, + uint8_t [32U], + void *); + core_result__core__result__Result_T__E___unwrap__uint8_t_32size_t__core_array_TryFromSliceError(dst, + rej); + uint8_t uu____1[800U]; + memcpy(uu____1, ind_cpa_public_key, (size_t)800U * sizeof (uint8_t)); + uint8_t pubkey[800U]; + from___800size_t(uu____1, pubkey); + int32_t uu____2[2U][256U]; + memcpy(uu____2, secret_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + int32_t uu____3[2U][256U]; + memcpy(uu____3, t_as_ntt, (size_t)2U * sizeof (int32_t [256U])); + int32_t uu____4[2U][2U][256U]; + memcpy(uu____4, a_transpose, (size_t)2U * sizeof (int32_t [2U][256U])); + uint8_t uu____5[32U]; + memcpy(uu____5, rej, (size_t)32U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, ind_cpa_public_key_hash, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t__ lit; + memcpy(lit.fst.secret_as_ntt, uu____2, (size_t)2U * sizeof (int32_t [256U])); + memcpy(lit.fst.t_as_ntt, uu____3, (size_t)2U * sizeof (int32_t [256U])); + memcpy(lit.fst.a_transpose, uu____4, (size_t)2U * sizeof (int32_t [2U][256U])); + memcpy(lit.fst.rej, uu____5, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.fst.ind_cpa_public_key_hash, uu____6, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.snd, pubkey, (size_t)800U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_MlKemState__2size_t___libcrux_kyber_types_MlKemPublicKey__800size_t__ +libcrux_kyber_kyber512_generate_key_pair_unpacked(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair_unpacked___2size_t_768size_t_1632size_t_800size_t_768size_t_3size_t_192size_t(uu____0); +} + +static uint8_t *as_slice___800size_t(uint8_t (*self)[800U]) +{ + return self[0U]; +} + +static inline void +deserialize_ring_elements_reduced___768size_t_2size_t( + Eurydice_slice public_key, + int32_t ret[2U][256U] +) +{ + int32_t deserialized_pk[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +sample_ring_element_cbd___2size_t_128size_t_2size_t( + uint8_t *prf_input, + uint8_t *domain_separator, + int32_t ret[2U][256U] +) +{ + int32_t error_1[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(error_1[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator[0U]; + domain_separator[0U] = (uint32_t)domain_separator[0U] + 1U; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t uu____0[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + uu____0); + memcpy(error_1[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, error_1, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void invert_ntt_montgomery___2size_t(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT / (size_t)2U; + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)1U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)7U, re); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_vector_u___2size_t( + int32_t (*a_as_ntt)[2U][256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_1)[256U], + int32_t ret[2U][256U] +) +{ + int32_t result[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + a_as_ntt, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [2U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = a_as_ntt[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*a_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(a_element, &r_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___2size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[256U]; + invert_ntt_montgomery___2size_t(result[i1], uu____1); + memcpy(result[i1], uu____1, (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i1][j] * (int32_t)1441); + int32_t + uu____2 = libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_1[i1][j]); + result[i1][j] = uu____2; + } + } + memcpy(ret, result, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +compute_ring_element_v___2size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_2)[256U], + int32_t (*message)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&t_as_ntt[i0], &r_as_ntt[i0], product); + add_to_ring_element___2size_t(result, &product, result); + } + invert_ntt_montgomery___2size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t + uu____0 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + + error_2[0U][i0] + + message[0U][i0]); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static void +compress_then_serialize_u___2size_t_640size_t_10size_t_320size_t( + int32_t input[2U][256U], + uint8_t ret[640U] +) +{ + uint8_t out[640U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)2U, + input, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, input[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)640U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * ((size_t)640U / (size_t)2U), + .end = (i0 + (size_t)1U) * ((size_t)640U / (size_t)2U) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[320U]; + libcrux_kyber_serialize_compress_then_serialize_ring_element_u___10size_t_320size_t(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)320U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)640U * sizeof (uint8_t)); +} + +static inline void into_padded_array___768size_t(Eurydice_slice slice, uint8_t ret[768U]) +{ + uint8_t out[768U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)768U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)768U * sizeof (uint8_t)); +} + +static void +encrypt_unpacked___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*a_transpose)[2U][256U], + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[768U] +) +{ + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(randomness, prf_input); + uint8_t uu____0[33U]; + memcpy(uu____0, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_2size_t__uint8_t + uu____1 = sample_vector_cbd_then_ntt___2size_t_3size_t_192size_t(uu____0, 0U); + int32_t r_as_ntt[2U][256U]; + memcpy(r_as_ntt, uu____1.fst, (size_t)2U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____1.snd; + int32_t error_1[2U][256U]; + sample_ring_element_cbd___2size_t_128size_t_2size_t(prf_input, &domain_separator, error_1); + prf_input[32U] = domain_separator; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t error_2[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + error_2); + int32_t u[2U][256U]; + compute_vector_u___2size_t(a_transpose, r_as_ntt, error_1, u); + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + int32_t message_as_ring_element[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_message(uu____2, message_as_ring_element); + int32_t v[256U]; + compute_ring_element_v___2size_t(t_as_ntt, r_as_ntt, &error_2, &message_as_ring_element, v); + int32_t uu____3[2U][256U]; + memcpy(uu____3, u, (size_t)2U * sizeof (int32_t [256U])); + uint8_t c1[640U]; + compress_then_serialize_u___2size_t_640size_t_10size_t_320size_t(uu____3, c1); + uint8_t c2[128U]; + libcrux_kyber_serialize_compress_then_serialize_ring_element_v___4size_t_128size_t(v, c2); + uint8_t ciphertext[768U]; + into_padded_array___768size_t(Eurydice_array_to_slice((size_t)640U, + c1, + uint8_t, + Eurydice_slice), + ciphertext); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)768U, + ciphertext, + (size_t)640U, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + core_array___Array_T__N__23__as_slice((size_t)128U, c2, uint8_t, Eurydice_slice), + uint8_t, + void *); + memcpy(ret, ciphertext, (size_t)768U * sizeof (uint8_t)); +} + +static void +encrypt___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t( + Eurydice_slice public_key, + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[768U] +) +{ + int32_t t_as_ntt[2U][256U]; + deserialize_ring_elements_reduced___768size_t_2size_t(Eurydice_slice_subslice_to(public_key, + (size_t)768U, + uint8_t, + size_t, + Eurydice_slice), + t_as_ntt); + Eurydice_slice + seed = Eurydice_slice_subslice_from(public_key, (size_t)768U, uint8_t, size_t, Eurydice_slice); + int32_t a_transpose[2U][2U][256U]; + uint8_t ret0[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed, ret0); + sample_matrix_A___2size_t(ret0, false, a_transpose); + int32_t (*uu____0)[256U] = t_as_ntt; + int32_t (*uu____1)[2U][256U] = a_transpose; + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + uint8_t ret1[768U]; + encrypt_unpacked___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t(uu____0, + uu____1, + uu____2, + randomness, + ret1); + memcpy(ret, ret1, (size_t)768U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemCiphertext___768size_t[768U]; + +static K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t_ +encapsulate___2size_t_768size_t_800size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t( + uint8_t (*public_key)[800U], + uint8_t randomness[32U] +) +{ + uint8_t to_hash[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + randomness, + uint8_t, + Eurydice_slice), + to_hash); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice_from((size_t)64U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + size_t, + Eurydice_slice); + uint8_t ret[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)800U, + as_slice___800size_t(public_key), + uint8_t, + Eurydice_slice), + ret); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)32U, ret, uint8_t, Eurydice_slice), + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____1.fst; + Eurydice_slice pseudorandomness = uu____1.snd; + Eurydice_slice + uu____2 = + Eurydice_array_to_slice((size_t)800U, + as_slice___800size_t(public_key), + uint8_t, + Eurydice_slice); + uint8_t uu____3[32U]; + memcpy(uu____3, randomness, (size_t)32U * sizeof (uint8_t)); + uint8_t ciphertext[768U]; + encrypt___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t(uu____2, + uu____3, + pseudorandomness, + ciphertext); + uint8_t shared_secret_array[32U] = { 0U }; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_slice((size_t)32U, + shared_secret_array, + uint8_t, + Eurydice_slice), + shared_secret, + uint8_t, + void *); + uint8_t uu____4[768U]; + memcpy(uu____4, ciphertext, (size_t)768U * sizeof (uint8_t)); + uint8_t uu____5[768U]; + memcpy(uu____5, uu____4, (size_t)768U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, shared_secret_array, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t_ lit; + memcpy(lit.fst, uu____5, (size_t)768U * sizeof (uint8_t)); + memcpy(lit.snd, uu____6, (size_t)32U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_types_MlKemCiphertext__768size_t___uint8_t_32size_t_ +libcrux_kyber_kyber512_encapsulate(uint8_t (*public_key)[800U], uint8_t randomness[32U]) +{ + uint8_t (*uu____0)[800U] = public_key; + uint8_t uu____1[32U]; + memcpy(uu____1, randomness, (size_t)32U * sizeof (uint8_t)); + return + encapsulate___2size_t_768size_t_800size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t(uu____0, + uu____1); +} + +static K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t +split_at___1632size_t(uint8_t (*self)[1632U], size_t mid) +{ + return + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)1632U, + self[0U], + uint8_t, + Eurydice_slice), + mid, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); +} + +static inline void +deserialize_secret_key___2size_t(Eurydice_slice secret_key, int32_t ret[2U][256U]) +{ + int32_t secret_as_ntt[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(secret_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(secret_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + secret_bytes = + Eurydice_slice_subslice(secret_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_uncompressed_ring_element(secret_bytes, uu____0); + memcpy(secret_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, secret_as_ntt, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +deserialize_then_decompress_u___2size_t_768size_t_10size_t( + uint8_t *ciphertext, + int32_t ret[2U][256U] +) +{ + int32_t u_as_ntt[2U][256U]; + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + memcpy(u_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)768U, + ciphertext, + uint8_t, + Eurydice_slice), + uint8_t, + size_t) + / (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U); + i++) + { + size_t i0 = i; + Eurydice_slice + u_bytes = + Eurydice_array_to_subslice((size_t)768U, + ciphertext, + ( + (core_ops_range_Range__size_t){ + .start = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U), + .end = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U) + + LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t u[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_ring_element_u___10size_t(u_bytes, u); + int32_t uu____0[256U]; + libcrux_kyber_ntt_ntt_vector_u___10size_t(u, uu____0); + memcpy(u_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, u_as_ntt, (size_t)2U * sizeof (int32_t [256U])); +} + +static inline void +compute_message___2size_t( + int32_t (*v)[256U], + int32_t (*secret_as_ntt)[256U], + int32_t (*u_as_ntt)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&secret_as_ntt[i0], &u_as_ntt[i0], product); + add_to_ring_element___2size_t(result, &product, result); + } + invert_ntt_montgomery___2size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(v[0U][i0] - coefficient_normal_form); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static void +decrypt_unpacked___2size_t_768size_t_640size_t_10size_t_4size_t( + int32_t (*secret_as_ntt)[256U], + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t u_as_ntt[2U][256U]; + deserialize_then_decompress_u___2size_t_768size_t_10size_t(ciphertext, u_as_ntt); + int32_t v[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_ring_element_v___4size_t(Eurydice_array_to_subslice_from((size_t)768U, + ciphertext, + (size_t)640U, + uint8_t, + size_t, + Eurydice_slice), + v); + int32_t message[256U]; + compute_message___2size_t(&v, secret_as_ntt, u_as_ntt, message); + uint8_t ret0[32U]; + libcrux_kyber_serialize_compress_then_serialize_message(message, ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decrypt___2size_t_768size_t_640size_t_10size_t_4size_t( + Eurydice_slice secret_key, + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t secret_as_ntt[2U][256U]; + deserialize_secret_key___2size_t(secret_key, secret_as_ntt); + uint8_t ret0[32U]; + decrypt_unpacked___2size_t_768size_t_640size_t_10size_t_4size_t(secret_as_ntt, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static inline void into_padded_array___800size_t(Eurydice_slice slice, uint8_t ret[800U]) +{ + uint8_t out[800U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)800U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)800U * sizeof (uint8_t)); +} + +static Eurydice_slice as_ref___768size_t(uint8_t (*self)[768U]) +{ + return Eurydice_array_to_slice((size_t)768U, self[0U], uint8_t, Eurydice_slice); +} + +static uint8_t +compare_ciphertexts_in_constant_time___768size_t(Eurydice_slice lhs, Eurydice_slice rhs) +{ + uint8_t r = 0U; + for (size_t i = (size_t)0U; i < (size_t)768U; i++) + { + size_t i0 = i; + uint8_t uu____0 = Eurydice_slice_index(lhs, i0, uint8_t, uint8_t); + r = + (uint32_t)r + | ((uint32_t)uu____0 ^ (uint32_t)Eurydice_slice_index(rhs, i0, uint8_t, uint8_t)); + } + return libcrux_kyber_constant_time_ops_is_non_zero(r); +} + +static void +decapsulate___2size_t_1632size_t_768size_t_800size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t_800size_t( + uint8_t (*secret_key)[1632U], + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +) +{ + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = split_at___1632size_t(secret_key, (size_t)768U); + Eurydice_slice ind_cpa_secret_key = uu____0.fst; + Eurydice_slice secret_key0 = uu____0.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(secret_key0, + (size_t)800U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key = uu____1.fst; + Eurydice_slice secret_key1 = uu____1.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____2 = + core_slice___Slice_T___split_at(secret_key1, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key_hash = uu____2.fst; + Eurydice_slice implicit_rejection_value = uu____2.snd; + uint8_t decrypted[32U]; + decrypt___2size_t_768size_t_640size_t_10size_t_4size_t(ind_cpa_secret_key, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____3 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____3.fst; + Eurydice_slice pseudorandomness = uu____3.snd; + uint8_t to_hash[800U]; + into_padded_array___800size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)800U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + as_ref___768size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)800U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + Eurydice_slice uu____5 = ind_cpa_public_key; + uint8_t uu____6[32U]; + memcpy(uu____6, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[768U]; + encrypt___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t(uu____5, + uu____6, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____7 = as_ref___768size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___768size_t(uu____7, + Eurydice_array_to_slice((size_t)768U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____8 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____8, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber512_decapsulate( + uint8_t (*secret_key)[1632U], + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate___2size_t_1632size_t_768size_t_800size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t_800size_t(secret_key, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decapsulate_unpacked___2size_t_1632size_t_768size_t_800size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t_800size_t( + libcrux_kyber_MlKemState___2size_t *state, + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +) +{ + int32_t (*secret_as_ntt)[256U] = state->secret_as_ntt; + int32_t (*t_as_ntt)[256U] = state->t_as_ntt; + int32_t (*a_transpose)[2U][256U] = state->a_transpose; + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_slice((size_t)32U, + state->rej, + uint8_t, + Eurydice_slice); + Eurydice_slice + ind_cpa_public_key_hash = + Eurydice_array_to_slice((size_t)32U, + state->ind_cpa_public_key_hash, + uint8_t, + Eurydice_slice); + uint8_t decrypted[32U]; + decrypt_unpacked___2size_t_768size_t_640size_t_10size_t_4size_t(secret_as_ntt, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____0.fst; + Eurydice_slice pseudorandomness = uu____0.snd; + uint8_t to_hash[800U]; + into_padded_array___800size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____1 = + Eurydice_array_to_subslice_from((size_t)800U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____1, + as_ref___768size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)800U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + int32_t (*uu____2)[256U] = t_as_ntt; + int32_t (*uu____3)[2U][256U] = a_transpose; + uint8_t uu____4[32U]; + memcpy(uu____4, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[768U]; + encrypt_unpacked___2size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t(uu____2, + uu____3, + uu____4, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____5 = as_ref___768size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___768size_t(uu____5, + Eurydice_array_to_slice((size_t)768U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____6 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____6, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber512_decapsulate_unpacked( + libcrux_kyber_MlKemState___2size_t *state, + uint8_t (*ciphertext)[768U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate_unpacked___2size_t_1632size_t_768size_t_800size_t_768size_t_768size_t_640size_t_128size_t_10size_t_4size_t_320size_t_3size_t_192size_t_2size_t_128size_t_800size_t(state, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + diff --git a/libcrux/src/libcrux_kyber768.c b/libcrux/src/libcrux_kyber768.c new file mode 100644 index 000000000..537fd3d43 --- /dev/null +++ b/libcrux/src/libcrux_kyber768.c @@ -0,0 +1,2502 @@ +/* + This file was generated by KaRaMeL + KaRaMeL invocation: ../../../eurydice/eurydice --config ../../kyber-c.yaml ../libcrux_kyber.llbc + F* version: b5cb71b8 + KaRaMeL version: 1282f04f + */ + +#include "internal/libcrux_kyber768.h" + +#include "internal/libcrux_kyber_common.h" +#include "libcrux_hacl_glue.h" + +static inline void +deserialize_ring_elements_reduced___1184size_t_3size_t( + Eurydice_slice public_key, + int32_t ret[3U][256U] +) +{ + int32_t deserialized_pk[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)3U * sizeof (int32_t [256U])); +} + +static inline void +serialize_secret_key___3size_t_1152size_t(int32_t key[3U][256U], uint8_t ret[1152U]) +{ + uint8_t out[1152U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + key, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, key[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)1152U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = (i0 + (size_t)1U) * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[384U]; + libcrux_kyber_serialize_serialize_uncompressed_ring_element(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)384U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)1152U * sizeof (uint8_t)); +} + +static inline void +serialize_public_key___3size_t_1152size_t_1184size_t( + int32_t t_as_ntt[3U][256U], + Eurydice_slice seed_for_a, + uint8_t ret[1184U] +) +{ + uint8_t public_key_serialized[1184U] = { 0U }; + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)1184U, + public_key_serialized, + ((core_ops_range_Range__size_t){ .start = (size_t)0U, .end = (size_t)1152U }), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____1[3U][256U]; + memcpy(uu____1, t_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + uint8_t ret0[1152U]; + serialize_secret_key___3size_t_1152size_t(uu____1, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)1152U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)1184U, + public_key_serialized, + (size_t)1152U, + uint8_t, + size_t, + Eurydice_slice), + seed_for_a, + uint8_t, + void *); + memcpy(ret, public_key_serialized, (size_t)1184U * sizeof (uint8_t)); +} + +static bool validate_public_key___3size_t_1152size_t_1184size_t(uint8_t *public_key) +{ + int32_t deserialized_pk[3U][256U]; + deserialize_ring_elements_reduced___1184size_t_3size_t(Eurydice_array_to_subslice_to((size_t)1184U, + public_key, + (size_t)1152U, + uint8_t, + size_t, + Eurydice_slice), + deserialized_pk); + int32_t uu____0[3U][256U]; + memcpy(uu____0, deserialized_pk, (size_t)3U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[1184U]; + serialize_public_key___3size_t_1152size_t_1184size_t(uu____0, + Eurydice_array_to_subslice_from((size_t)1184U, + public_key, + (size_t)1152U, + uint8_t, + size_t, + Eurydice_slice), + public_key_serialized); + return + core_array_equality___core__cmp__PartialEq__Array_B__N___for__Array_A__N____eq((size_t)1184U, + public_key, + public_key_serialized, + uint8_t, + uint8_t, + bool); +} + +core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__ +libcrux_kyber_kyber768_validate_public_key(uint8_t public_key[1184U]) +{ + core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__ uu____0; + if (validate_public_key___3size_t_1152size_t_1184size_t(public_key)) + { + core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__ lit; + lit.tag = core_option_Some; + memcpy(lit.f0, public_key, (size_t)1184U * sizeof (uint8_t)); + uu____0 = lit; + } + else + { + uu____0 = + ( + (core_option_Option__libcrux_kyber_types_MlKemPublicKey__1184size_t__){ + .tag = core_option_None + } + ); + } + return uu____0; +} + +static inline libcrux_digest_incremental_x4_Shake128StateX4 +absorb___3size_t(uint8_t input[3U][34U]) +{ + libcrux_digest_incremental_x4_Shake128StateX4 + state = libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__new(); + Eurydice_slice data[3U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + uint8_t buf[1U] = { 0U }; + data[i] = Eurydice_array_to_slice((size_t)1U, buf, uint8_t, Eurydice_slice); + } + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + Eurydice_slice + uu____0 = Eurydice_array_to_slice((size_t)34U, input[i0], uint8_t, Eurydice_slice); + data[i0] = uu____0; + } + libcrux_digest_incremental_x4_Shake128StateX4 *uu____1 = &state; + Eurydice_slice uu____2[3U]; + memcpy(uu____2, data, (size_t)3U * sizeof (Eurydice_slice)); + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__absorb_final((size_t)3U, + uu____1, + uu____2, + void *); + return state; +} + +static inline void +squeeze_three_blocks___3size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[3U][504U] +) +{ + uint8_t output[3U][504U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)504U, + (size_t)3U, + xof_state, + output, + void *); + uint8_t out[3U][504U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + uint8_t uu____0[504U]; + memcpy(uu____0, output[i0], (size_t)504U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)504U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)3U * sizeof (uint8_t [504U])); +} + +static bool +sample_from_uniform_distribution_next___3size_t_504size_t( + uint8_t randomness[3U][504U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)504U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static inline void +squeeze_block___3size_t( + libcrux_digest_incremental_x4_Shake128StateX4 *xof_state, + uint8_t ret[3U][168U] +) +{ + uint8_t output[3U][168U]; + libcrux_digest_incremental_x4__libcrux__digest__incremental_x4__Shake128StateX4__squeeze_blocks((size_t)168U, + (size_t)3U, + xof_state, + output, + void *); + uint8_t out[3U][168U] = { { 0U } }; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + uint8_t uu____0[168U]; + memcpy(uu____0, output[i0], (size_t)168U * sizeof (uint8_t)); + memcpy(out[i0], uu____0, (size_t)168U * sizeof (uint8_t)); + } + memcpy(ret, out, (size_t)3U * sizeof (uint8_t [168U])); +} + +static bool +sample_from_uniform_distribution_next___3size_t_168size_t( + uint8_t randomness[3U][168U], + size_t *sampled_coefficients, + int32_t (*out)[256U] +) +{ + bool done = true; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + core_slice_iter_Chunks + iter = + core_iter_traits_collect___core__iter__traits__collect__IntoIterator_for_I___into_iter(core_slice___Slice_T___chunks(Eurydice_array_to_slice((size_t)168U, + randomness[i0], + uint8_t, + Eurydice_slice), + (size_t)3U, + uint8_t, + core_slice_iter_Chunks), + core_slice_iter_Chunks, + core_slice_iter_Chunks); + while (true) + { + core_option_Option__Eurydice_slice_uint8_t + uu____0 = + core_slice_iter___core__iter__traits__iterator__Iterator_for_core__slice__iter__Chunks__a__T___70__next(&iter, + uint8_t, + core_option_Option__Eurydice_slice_uint8_t); + if (uu____0.tag == core_option_None) + { + break; + } + else + { + Eurydice_slice bytes = uu____0.f0; + int32_t b1 = (int32_t)Eurydice_slice_index(bytes, (size_t)0U, uint8_t, uint8_t); + int32_t b2 = (int32_t)Eurydice_slice_index(bytes, (size_t)1U, uint8_t, uint8_t); + int32_t b3 = (int32_t)Eurydice_slice_index(bytes, (size_t)2U, uint8_t, uint8_t); + int32_t d1 = (b2 & (int32_t)15) << 8U | b1; + int32_t d2 = b3 << 4U | b2 >> 4U; + bool uu____1; + if (d1 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____1 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____1 = false; + } + if (uu____1) + { + out[i0][sampled_coefficients[i0]] = d1; + size_t uu____2 = i0; + sampled_coefficients[uu____2] = sampled_coefficients[uu____2] + (size_t)1U; + } + bool uu____3; + if (d2 < LIBCRUX_KYBER_CONSTANTS_FIELD_MODULUS) + { + uu____3 = sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; + } + else + { + uu____3 = false; + } + if (uu____3) + { + out[i0][sampled_coefficients[i0]] = d2; + size_t uu____4 = i0; + sampled_coefficients[uu____4] = sampled_coefficients[uu____4] + (size_t)1U; + } + } + } + if (sampled_coefficients[i0] < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT) + { + done = false; + } + } + return done; +} + +static void sample_from_xof___3size_t(uint8_t seeds[3U][34U], int32_t ret[3U][256U]) +{ + size_t sampled_coefficients[3U] = { 0U }; + int32_t out[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(out[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + uint8_t uu____0[3U][34U]; + memcpy(uu____0, seeds, (size_t)3U * sizeof (uint8_t [34U])); + libcrux_digest_incremental_x4_Shake128StateX4 xof_state = absorb___3size_t(uu____0); + uint8_t randomness0[3U][504U]; + squeeze_three_blocks___3size_t(&xof_state, randomness0); + uint8_t uu____1[3U][504U]; + memcpy(uu____1, randomness0, (size_t)3U * sizeof (uint8_t [504U])); + bool + done = + sample_from_uniform_distribution_next___3size_t_504size_t(uu____1, + sampled_coefficients, + out); + while (true) + { + if (!!done) + { + break; + } + uint8_t randomness[3U][168U]; + squeeze_block___3size_t(&xof_state, randomness); + uint8_t uu____2[3U][168U]; + memcpy(uu____2, randomness, (size_t)3U * sizeof (uint8_t [168U])); + done = + sample_from_uniform_distribution_next___3size_t_168size_t(uu____2, + sampled_coefficients, + out); + } + libcrux_kyber_hash_functions_free_state(xof_state); + memcpy(ret, out, (size_t)3U * sizeof (int32_t [256U])); +} + +static inline void +sample_matrix_A___3size_t(uint8_t seed[34U], bool transpose, int32_t ret[3U][3U][256U]) +{ + int32_t A_transpose[3U][3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(A_transpose[i][0U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][1U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + memcpy(A_transpose[i][2U], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i0 = (size_t)0U; i0 < (size_t)3U; i0++) + { + size_t i1 = i0; + uint8_t uu____0[34U]; + memcpy(uu____0, seed, (size_t)34U * sizeof (uint8_t)); + uint8_t seeds[3U][34U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(seeds[i], uu____0, (size_t)34U * sizeof (uint8_t)); + } + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t j = i; + seeds[j][32U] = (uint8_t)i1; + seeds[j][33U] = (uint8_t)j; + } + uint8_t uu____1[3U][34U]; + memcpy(uu____1, seeds, (size_t)3U * sizeof (uint8_t [34U])); + int32_t sampled[3U][256U]; + sample_from_xof___3size_t(uu____1, sampled); + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t j = i; + if (transpose) + { + memcpy(A_transpose[j][i1], sampled[j], (size_t)256U * sizeof (int32_t)); + } + else + { + memcpy(A_transpose[i1][j], sampled[j], (size_t)256U * sizeof (int32_t)); + } + } + } + memcpy(ret, A_transpose, (size_t)3U * sizeof (int32_t [3U][256U])); +} + +void libcrux_kyber_ind_cpa_into_padded_array___34size_t(Eurydice_slice slice, uint8_t ret[34U]) +{ + uint8_t out[34U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)34U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)34U * sizeof (uint8_t)); +} + +void libcrux_kyber_ind_cpa_into_padded_array___33size_t(Eurydice_slice slice, uint8_t ret[33U]) +{ + uint8_t out[33U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)33U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)33U * sizeof (uint8_t)); +} + +void libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_slice input, uint8_t ret[128U]) +{ + uint8_t ret0[128U]; + libcrux_digest_shake256((size_t)128U, input, ret0, void *); + memcpy(ret, ret0, (size_t)128U * sizeof (uint8_t)); +} + +void +libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t( + Eurydice_slice randomness, + int32_t ret[256U] +) +{ + int32_t uu____0[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution_2(randomness, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +typedef struct __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t_s +{ + int32_t fst[3U][256U]; + uint8_t snd; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t; + +static inline __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t +sample_vector_cbd_then_ntt___3size_t_2size_t_128size_t( + uint8_t prf_input[33U], + uint8_t domain_separator +) +{ + int32_t re_as_ntt[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(re_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator; + domain_separator = (uint32_t)domain_separator + 1U; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t r[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + r); + int32_t uu____0[256U]; + libcrux_kyber_ntt_ntt_binomially_sampled_ring_element(r, uu____0); + memcpy(re_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[3U][256U]; + memcpy(uu____1, re_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t lit; + memcpy(lit.fst, uu____1, (size_t)3U * sizeof (int32_t [256U])); + lit.snd = domain_separator; + return lit; +} + +static void +add_to_ring_element___3size_t(int32_t lhs[256U], int32_t (*rhs)[256U], int32_t ret[256U]) +{ + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, lhs, int32_t, Eurydice_slice), + int32_t, + size_t); + i++) + { + size_t i0 = i; + size_t uu____0 = i0; + lhs[uu____0] = lhs[uu____0] + rhs[0U][i0]; + } + memcpy(ret, lhs, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_As_plus_e___3size_t( + int32_t (*matrix_A)[3U][256U], + int32_t (*s_as_ntt)[256U], + int32_t (*error_as_ntt)[256U], + int32_t ret[3U][256U] +) +{ + int32_t result[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + matrix_A, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [3U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = matrix_A[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*matrix_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(matrix_element, &s_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___3size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t coefficient_normal_form = libcrux_kyber_arithmetic_to_standard_domain(result[i1][j]); + int32_t + uu____1 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_as_ntt[i1][j]); + result[i1][j] = uu____1; + } + } + memcpy(ret, result, (size_t)3U * sizeof (int32_t [256U])); +} + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__s +{ + int32_t fst[3U][256U]; + int32_t snd[3U][256U]; + int32_t thd[3U][3U][256U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t_; + +typedef struct +__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t__s +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t_ + fst; + uint8_t snd[1184U]; +} +__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t_; + +static __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t_ +generate_keypair_unpacked___3size_t_1184size_t_1152size_t_2size_t_128size_t( + Eurydice_slice key_generation_seed +) +{ + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(key_generation_seed, hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + (size_t)32U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice seed_for_A = uu____0.fst; + Eurydice_slice seed_for_secret_and_error = uu____0.snd; + int32_t a_transpose[3U][3U][256U]; + uint8_t ret[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed_for_A, ret); + sample_matrix_A___3size_t(ret, true, a_transpose); + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(seed_for_secret_and_error, prf_input); + uint8_t uu____1[33U]; + memcpy(uu____1, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t + uu____2 = sample_vector_cbd_then_ntt___3size_t_2size_t_128size_t(uu____1, 0U); + int32_t secret_as_ntt[3U][256U]; + memcpy(secret_as_ntt, uu____2.fst, (size_t)3U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____2.snd; + uint8_t uu____3[33U]; + memcpy(uu____3, prf_input, (size_t)33U * sizeof (uint8_t)); + int32_t error_as_ntt[3U][256U]; + memcpy(error_as_ntt, + sample_vector_cbd_then_ntt___3size_t_2size_t_128size_t(uu____3, domain_separator).fst, + (size_t)3U * sizeof (int32_t [256U])); + int32_t t_as_ntt[3U][256U]; + compute_As_plus_e___3size_t(a_transpose, secret_as_ntt, error_as_ntt, t_as_ntt); + int32_t uu____4[3U][256U]; + memcpy(uu____4, t_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + uint8_t public_key_serialized[1184U]; + serialize_public_key___3size_t_1152size_t_1184size_t(uu____4, + seed_for_A, + public_key_serialized); + for (size_t i0 = (size_t)0U; i0 < (size_t)3U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)256U; i++) + { + size_t j = i; + uint16_t uu____5 = libcrux_kyber_arithmetic_to_unsigned_representative(secret_as_ntt[i1][j]); + secret_as_ntt[i1][j] = (int32_t)uu____5; + uint16_t uu____6 = libcrux_kyber_arithmetic_to_unsigned_representative(t_as_ntt[i1][j]); + t_as_ntt[i1][j] = (int32_t)uu____6; + } + } + int32_t a_matrix[3U][3U][256U]; + memcpy(a_matrix, a_transpose, (size_t)3U * sizeof (int32_t [3U][256U])); + for (size_t i0 = (size_t)0U; i0 < (size_t)3U; i0++) + { + size_t i1 = i0; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t j = i; + memcpy(a_matrix[i1][j], a_transpose[j][i1], (size_t)256U * sizeof (int32_t)); + } + } + int32_t uu____7[3U][256U]; + memcpy(uu____7, secret_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + int32_t uu____8[3U][256U]; + memcpy(uu____8, t_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + int32_t uu____9[3U][3U][256U]; + memcpy(uu____9, a_matrix, (size_t)3U * sizeof (int32_t [3U][256U])); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t_ + uu____10; + memcpy(uu____10.fst, uu____7, (size_t)3U * sizeof (int32_t [256U])); + memcpy(uu____10.snd, uu____8, (size_t)3U * sizeof (int32_t [256U])); + memcpy(uu____10.thd, uu____9, (size_t)3U * sizeof (int32_t [3U][256U])); + uint8_t uu____11[1184U]; + memcpy(uu____11, public_key_serialized, (size_t)1184U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t_ + lit; + lit.fst = uu____10; + memcpy(lit.snd, uu____11, (size_t)1184U * sizeof (uint8_t)); + return lit; +} + +typedef struct __uint8_t_1152size_t__uint8_t_1184size_t__s +{ + uint8_t fst[1152U]; + uint8_t snd[1184U]; +} +__uint8_t_1152size_t__uint8_t_1184size_t_; + +static __uint8_t_1152size_t__uint8_t_1184size_t_ +generate_keypair___3size_t_1152size_t_1184size_t_1152size_t_2size_t_128size_t( + Eurydice_slice key_generation_seed +) +{ + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t_ + uu____0 = + generate_keypair_unpacked___3size_t_1184size_t_1152size_t_2size_t_128size_t(key_generation_seed); + int32_t secret_as_ntt[3U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)3U * sizeof (int32_t [256U])); + int32_t _t_as_ntt[3U][256U]; + memcpy(_t_as_ntt, uu____0.fst.snd, (size_t)3U * sizeof (int32_t [256U])); + int32_t _a_transpose[3U][3U][256U]; + memcpy(_a_transpose, uu____0.fst.thd, (size_t)3U * sizeof (int32_t [3U][256U])); + uint8_t public_key_serialized[1184U]; + memcpy(public_key_serialized, uu____0.snd, (size_t)1184U * sizeof (uint8_t)); + int32_t uu____1[3U][256U]; + memcpy(uu____1, secret_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + uint8_t secret_key_serialized[1152U]; + serialize_secret_key___3size_t_1152size_t(uu____1, secret_key_serialized); + uint8_t uu____2[1152U]; + memcpy(uu____2, secret_key_serialized, (size_t)1152U * sizeof (uint8_t)); + uint8_t uu____3[1184U]; + memcpy(uu____3, public_key_serialized, (size_t)1184U * sizeof (uint8_t)); + __uint8_t_1152size_t__uint8_t_1184size_t_ lit; + memcpy(lit.fst, uu____2, (size_t)1152U * sizeof (uint8_t)); + memcpy(lit.snd, uu____3, (size_t)1184U * sizeof (uint8_t)); + return lit; +} + +static inline void +serialize_kem_secret_key___2400size_t( + Eurydice_slice private_key, + Eurydice_slice public_key, + Eurydice_slice implicit_rejection_value, + uint8_t ret[2400U] +) +{ + uint8_t out[2400U] = { 0U }; + size_t pointer = (size_t)0U; + uint8_t *uu____0 = out; + size_t uu____1 = pointer; + size_t uu____2 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)2400U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = uu____1, + .end = uu____2 + core_slice___Slice_T___len(private_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + private_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(private_key, uint8_t, size_t); + uint8_t *uu____3 = out; + size_t uu____4 = pointer; + size_t uu____5 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)2400U, + uu____3, + ( + (core_ops_range_Range__size_t){ + .start = uu____4, + .end = uu____5 + core_slice___Slice_T___len(public_key, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + public_key, + uint8_t, + void *); + pointer = pointer + core_slice___Slice_T___len(public_key, uint8_t, size_t); + Eurydice_slice + uu____6 = + Eurydice_array_to_subslice((size_t)2400U, + out, + ( + (core_ops_range_Range__size_t){ + .start = pointer, + .end = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[32U]; + libcrux_kyber_hash_functions_H(public_key, ret0); + core_slice___Slice_T___copy_from_slice(uu____6, + Eurydice_array_to_slice((size_t)32U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + pointer = pointer + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE; + uint8_t *uu____7 = out; + size_t uu____8 = pointer; + size_t uu____9 = pointer; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)2400U, + uu____7, + ( + (core_ops_range_Range__size_t){ + .start = uu____8, + .end = uu____9 + core_slice___Slice_T___len(implicit_rejection_value, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + implicit_rejection_value, + uint8_t, + void *); + memcpy(ret, out, (size_t)2400U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemPrivateKey___2400size_t[2400U]; + +static void from___2400size_t(uint8_t value[2400U], uint8_t ret[2400U]) +{ + uint8_t uu____0[2400U]; + memcpy(uu____0, value, (size_t)2400U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)2400U * sizeof (uint8_t)); +} + +static libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t +from___2400size_t_1184size_t(uint8_t sk[2400U], uint8_t pk[1184U]) +{ + libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t lit; + memcpy(lit.sk, sk, (size_t)2400U * sizeof (uint8_t)); + memcpy(lit.pk, pk, (size_t)1184U * sizeof (uint8_t)); + return lit; +} + +static libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t +generate_keypair___3size_t_1152size_t_2400size_t_1184size_t_1152size_t_2size_t_128size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __uint8_t_1152size_t__uint8_t_1184size_t_ + uu____0 = + generate_keypair___3size_t_1152size_t_1184size_t_1152size_t_2size_t_128size_t(ind_cpa_keypair_randomness); + uint8_t ind_cpa_private_key[1152U]; + memcpy(ind_cpa_private_key, uu____0.fst, (size_t)1152U * sizeof (uint8_t)); + uint8_t public_key[1184U]; + memcpy(public_key, uu____0.snd, (size_t)1184U * sizeof (uint8_t)); + Eurydice_slice + uu____1 = Eurydice_array_to_slice((size_t)1152U, ind_cpa_private_key, uint8_t, Eurydice_slice); + uint8_t secret_key_serialized[2400U]; + serialize_kem_secret_key___2400size_t(uu____1, + Eurydice_array_to_slice((size_t)1184U, public_key, uint8_t, Eurydice_slice), + implicit_rejection_value, + secret_key_serialized); + uint8_t uu____2[2400U]; + memcpy(uu____2, secret_key_serialized, (size_t)2400U * sizeof (uint8_t)); + uint8_t private_key[2400U]; + from___2400size_t(uu____2, private_key); + uint8_t uu____3[2400U]; + memcpy(uu____3, private_key, (size_t)2400U * sizeof (uint8_t)); + uint8_t uu____4[1184U]; + memcpy(uu____4, public_key, (size_t)1184U * sizeof (uint8_t)); + return from___2400size_t_1184size_t(uu____3, uu____4); +} + +libcrux_kyber_types_MlKemKeyPair___2400size_t_1184size_t +libcrux_kyber_kyber768_generate_key_pair(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair___3size_t_1152size_t_2400size_t_1184size_t_1152size_t_2size_t_128size_t(uu____0); +} + +void +core_result__core__result__Result_T__E___unwrap__uint8_t_32size_t__core_array_TryFromSliceError( + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError self, + uint8_t ret[32U] +) +{ + if (self.tag == core_result_Ok) + { + uint8_t f0[32U]; + memcpy(f0, self.val.case_Ok, (size_t)32U * sizeof (uint8_t)); + memcpy(ret, f0, (size_t)32U * sizeof (uint8_t)); + } + else + { + KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n", __FILE__, __LINE__, "unwrap not Ok"); + KRML_HOST_EXIT(255U); + } +} + +static void from___1184size_t(uint8_t value[1184U], uint8_t ret[1184U]) +{ + uint8_t uu____0[1184U]; + memcpy(uu____0, value, (size_t)1184U * sizeof (uint8_t)); + memcpy(ret, uu____0, (size_t)1184U * sizeof (uint8_t)); +} + +static K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t__ +generate_keypair_unpacked___3size_t_1152size_t_2400size_t_1184size_t_1152size_t_2size_t_128size_t( + uint8_t randomness[64U] +) +{ + Eurydice_slice + ind_cpa_keypair_randomness = + Eurydice_array_to_subslice((size_t)64U, + randomness, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_subslice_from((size_t)64U, + randomness, + LIBCRUX_KYBER_CONSTANTS_CPA_PKE_KEY_GENERATION_SEED_SIZE, + uint8_t, + size_t, + Eurydice_slice); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t____libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__3size_t__uint8_t_1184size_t_ + uu____0 = + generate_keypair_unpacked___3size_t_1184size_t_1152size_t_2size_t_128size_t(ind_cpa_keypair_randomness); + int32_t secret_as_ntt[3U][256U]; + memcpy(secret_as_ntt, uu____0.fst.fst, (size_t)3U * sizeof (int32_t [256U])); + int32_t t_as_ntt[3U][256U]; + memcpy(t_as_ntt, uu____0.fst.snd, (size_t)3U * sizeof (int32_t [256U])); + int32_t a_transpose[3U][3U][256U]; + memcpy(a_transpose, uu____0.fst.thd, (size_t)3U * sizeof (int32_t [3U][256U])); + uint8_t ind_cpa_public_key[1184U]; + memcpy(ind_cpa_public_key, uu____0.snd, (size_t)1184U * sizeof (uint8_t)); + uint8_t ind_cpa_public_key_hash[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)1184U, + ind_cpa_public_key, + uint8_t, + Eurydice_slice), + ind_cpa_public_key_hash); + uint8_t rej[32U]; + core_result_Result__uint8_t_32size_t__core_array_TryFromSliceError dst; + Eurydice_slice_to_array2(&dst, + implicit_rejection_value, + Eurydice_slice, + uint8_t [32U], + void *); + core_result__core__result__Result_T__E___unwrap__uint8_t_32size_t__core_array_TryFromSliceError(dst, + rej); + uint8_t uu____1[1184U]; + memcpy(uu____1, ind_cpa_public_key, (size_t)1184U * sizeof (uint8_t)); + uint8_t pubkey[1184U]; + from___1184size_t(uu____1, pubkey); + int32_t uu____2[3U][256U]; + memcpy(uu____2, secret_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + int32_t uu____3[3U][256U]; + memcpy(uu____3, t_as_ntt, (size_t)3U * sizeof (int32_t [256U])); + int32_t uu____4[3U][3U][256U]; + memcpy(uu____4, a_transpose, (size_t)3U * sizeof (int32_t [3U][256U])); + uint8_t uu____5[32U]; + memcpy(uu____5, rej, (size_t)32U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, ind_cpa_public_key_hash, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t__ lit; + memcpy(lit.fst.secret_as_ntt, uu____2, (size_t)3U * sizeof (int32_t [256U])); + memcpy(lit.fst.t_as_ntt, uu____3, (size_t)3U * sizeof (int32_t [256U])); + memcpy(lit.fst.a_transpose, uu____4, (size_t)3U * sizeof (int32_t [3U][256U])); + memcpy(lit.fst.rej, uu____5, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.fst.ind_cpa_public_key_hash, uu____6, (size_t)32U * sizeof (uint8_t)); + memcpy(lit.snd, pubkey, (size_t)1184U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_MlKemState__3size_t___libcrux_kyber_types_MlKemPublicKey__1184size_t__ +libcrux_kyber_kyber768_generate_key_pair_unpacked(uint8_t randomness[64U]) +{ + uint8_t uu____0[64U]; + memcpy(uu____0, randomness, (size_t)64U * sizeof (uint8_t)); + return + generate_keypair_unpacked___3size_t_1152size_t_2400size_t_1184size_t_1152size_t_2size_t_128size_t(uu____0); +} + +void libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_slice slice, uint8_t ret[64U]) +{ + uint8_t out[64U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)64U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)64U * sizeof (uint8_t)); +} + +static uint8_t *as_slice___1184size_t(uint8_t (*self)[1184U]) +{ + return self[0U]; +} + +static inline void +deserialize_ring_elements_reduced___1152size_t_3size_t( + Eurydice_slice public_key, + int32_t ret[3U][256U] +) +{ + int32_t deserialized_pk[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(deserialized_pk[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(public_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + ring_element = + Eurydice_slice_subslice(public_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_reduced_ring_element(ring_element, uu____0); + memcpy(deserialized_pk[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, deserialized_pk, (size_t)3U * sizeof (int32_t [256U])); +} + +static inline void +sample_ring_element_cbd___3size_t_128size_t_2size_t( + uint8_t *prf_input, + uint8_t *domain_separator, + int32_t ret[3U][256U] +) +{ + int32_t error_1[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(error_1[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + prf_input[32U] = domain_separator[0U]; + domain_separator[0U] = (uint32_t)domain_separator[0U] + 1U; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t uu____0[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + uu____0); + memcpy(error_1[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, error_1, (size_t)3U * sizeof (int32_t [256U])); +} + +static inline void invert_ntt_montgomery___3size_t(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT / (size_t)2U; + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)1U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_invert_ntt_at_layer(&zeta_i, re, (size_t)7U, re); + for (size_t i = (size_t)0U; i < (size_t)2U; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_vector_u___3size_t( + int32_t (*a_as_ntt)[3U][256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_1)[256U], + int32_t ret[3U][256U] +) +{ + int32_t result[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(result[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i0 = (size_t)0U; + i0 + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + a_as_ntt, + Eurydice_error_t_cg_array, + Eurydice_slice), + int32_t [3U][256U], + size_t); + i0++) + { + size_t i1 = i0; + int32_t (*row)[256U] = a_as_ntt[i1]; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + row, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t j = i; + int32_t (*a_element)[256U] = &row[j]; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(a_element, &r_as_ntt[j], product); + int32_t uu____0[256U]; + add_to_ring_element___3size_t(result[i1], &product, uu____0); + memcpy(result[i1], uu____0, (size_t)256U * sizeof (int32_t)); + } + int32_t uu____1[256U]; + invert_ntt_montgomery___3size_t(result[i1], uu____1); + memcpy(result[i1], uu____1, (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t j = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i1][j] * (int32_t)1441); + int32_t + uu____2 = libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + error_1[i1][j]); + result[i1][j] = uu____2; + } + } + memcpy(ret, result, (size_t)3U * sizeof (int32_t [256U])); +} + +static inline void +compute_ring_element_v___3size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*r_as_ntt)[256U], + int32_t (*error_2)[256U], + int32_t (*message)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&t_as_ntt[i0], &r_as_ntt[i0], product); + add_to_ring_element___3size_t(result, &product, result); + } + invert_ntt_montgomery___3size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t + uu____0 = + libcrux_kyber_arithmetic_barrett_reduce(coefficient_normal_form + + error_2[0U][i0] + + message[0U][i0]); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static inline void compress_then_serialize_10___320size_t(int32_t re[256U], uint8_t ret[320U]) +{ + uint8_t serialized[320U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)4U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)4U, + .end = i0 * (size_t)4U + (size_t)4U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t + coefficient1 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + int32_t + coefficient2 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + int32_t + coefficient3 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + int32_t + coefficient4 = + libcrux_kyber_compress_compress_ciphertext_coefficient(10U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_10(coefficient1, + coefficient2, + coefficient3, + coefficient4); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + serialized[(size_t)5U * i0] = coef1; + serialized[(size_t)5U * i0 + (size_t)1U] = coef2; + serialized[(size_t)5U * i0 + (size_t)2U] = coef3; + serialized[(size_t)5U * i0 + (size_t)3U] = coef4; + serialized[(size_t)5U * i0 + (size_t)4U] = coef5; + } + memcpy(ret, serialized, (size_t)320U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_11___320size_t(int32_t re[256U], uint8_t ret[320U]) +{ + uint8_t serialized[320U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)8U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)8U, + .end = i0 * (size_t)8U + (size_t)8U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t + coefficient1 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + int32_t + coefficient2 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + int32_t + coefficient3 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + int32_t + coefficient4 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + int32_t + coefficient5 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)4U, + int32_t, + int32_t))); + int32_t + coefficient6 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)5U, + int32_t, + int32_t))); + int32_t + coefficient7 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)6U, + int32_t, + int32_t))); + int32_t + coefficient8 = + libcrux_kyber_compress_compress_ciphertext_coefficient(11U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)7U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_11(coefficient1, + coefficient2, + coefficient3, + coefficient4, + coefficient5, + coefficient6, + coefficient7, + coefficient8); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + uint8_t coef6 = uu____0.f5; + uint8_t coef7 = uu____0.f6; + uint8_t coef8 = uu____0.f7; + uint8_t coef9 = uu____0.f8; + uint8_t coef10 = uu____0.f9; + uint8_t coef11 = uu____0.f10; + serialized[(size_t)11U * i0] = coef1; + serialized[(size_t)11U * i0 + (size_t)1U] = coef2; + serialized[(size_t)11U * i0 + (size_t)2U] = coef3; + serialized[(size_t)11U * i0 + (size_t)3U] = coef4; + serialized[(size_t)11U * i0 + (size_t)4U] = coef5; + serialized[(size_t)11U * i0 + (size_t)5U] = coef6; + serialized[(size_t)11U * i0 + (size_t)6U] = coef7; + serialized[(size_t)11U * i0 + (size_t)7U] = coef8; + serialized[(size_t)11U * i0 + (size_t)8U] = coef9; + serialized[(size_t)11U * i0 + (size_t)9U] = coef10; + serialized[(size_t)11U * i0 + (size_t)10U] = coef11; + } + memcpy(ret, serialized, (size_t)320U * sizeof (uint8_t)); +} + +void +libcrux_kyber_serialize_compress_then_serialize_ring_element_u___10size_t_320size_t( + int32_t re[256U], + uint8_t ret[320U] +) +{ + uint8_t uu____0[320U]; + compress_then_serialize_10___320size_t(re, uu____0); + memcpy(ret, uu____0, (size_t)320U * sizeof (uint8_t)); +} + +static void +compress_then_serialize_u___3size_t_960size_t_10size_t_320size_t( + int32_t input[3U][256U], + uint8_t ret[960U] +) +{ + uint8_t out[960U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)3U, + input, + int32_t [256U], + Eurydice_slice), + int32_t [256U], + size_t); + i++) + { + size_t i0 = i; + int32_t re[256U]; + memcpy(re, input[i0], (size_t)256U * sizeof (int32_t)); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice((size_t)960U, + out, + ( + (core_ops_range_Range__size_t){ + .start = i0 * ((size_t)960U / (size_t)3U), + .end = (i0 + (size_t)1U) * ((size_t)960U / (size_t)3U) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t ret0[320U]; + libcrux_kyber_serialize_compress_then_serialize_ring_element_u___10size_t_320size_t(re, ret0); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)320U, ret0, uint8_t, Eurydice_slice), + uint8_t, + void *); + } + memcpy(ret, out, (size_t)960U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_4___128size_t(int32_t re[256U], uint8_t ret[128U]) +{ + uint8_t serialized[128U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)2U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)2U, + .end = i0 * (size_t)2U + (size_t)2U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t + coefficient1 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(4U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + uint8_t + coefficient2 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(4U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + serialized[i0] = (uint32_t)coefficient2 << 4U | (uint32_t)coefficient1; + } + memcpy(ret, serialized, (size_t)128U * sizeof (uint8_t)); +} + +static inline void compress_then_serialize_5___128size_t(int32_t re[256U], uint8_t ret[128U]) +{ + uint8_t serialized[128U] = { 0U }; + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)256U, re, int32_t, Eurydice_slice), + int32_t, + size_t) + / (size_t)8U; + i++) + { + size_t i0 = i; + Eurydice_slice + coefficients = + Eurydice_array_to_subslice((size_t)256U, + re, + ( + (core_ops_range_Range__size_t){ + .start = i0 * (size_t)8U, + .end = i0 * (size_t)8U + (size_t)8U + } + ), + int32_t, + core_ops_range_Range__size_t, + Eurydice_slice); + uint8_t + coefficient1 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)0U, + int32_t, + int32_t))); + uint8_t + coefficient2 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)1U, + int32_t, + int32_t))); + uint8_t + coefficient3 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)2U, + int32_t, + int32_t))); + uint8_t + coefficient4 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)3U, + int32_t, + int32_t))); + uint8_t + coefficient5 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)4U, + int32_t, + int32_t))); + uint8_t + coefficient6 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)5U, + int32_t, + int32_t))); + uint8_t + coefficient7 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)6U, + int32_t, + int32_t))); + uint8_t + coefficient8 = + (uint8_t)libcrux_kyber_compress_compress_ciphertext_coefficient(5U, + libcrux_kyber_arithmetic_to_unsigned_representative(Eurydice_slice_index(coefficients, + (size_t)7U, + int32_t, + int32_t))); + K___uint8_t_uint8_t_uint8_t_uint8_t_uint8_t + uu____0 = + libcrux_kyber_serialize_compress_coefficients_5(coefficient2, + coefficient1, + coefficient4, + coefficient3, + coefficient5, + coefficient7, + coefficient6, + coefficient8); + uint8_t coef1 = uu____0.fst; + uint8_t coef2 = uu____0.snd; + uint8_t coef3 = uu____0.thd; + uint8_t coef4 = uu____0.f3; + uint8_t coef5 = uu____0.f4; + serialized[(size_t)5U * i0] = coef1; + serialized[(size_t)5U * i0 + (size_t)1U] = coef2; + serialized[(size_t)5U * i0 + (size_t)2U] = coef3; + serialized[(size_t)5U * i0 + (size_t)3U] = coef4; + serialized[(size_t)5U * i0 + (size_t)4U] = coef5; + } + memcpy(ret, serialized, (size_t)128U * sizeof (uint8_t)); +} + +void +libcrux_kyber_serialize_compress_then_serialize_ring_element_v___4size_t_128size_t( + int32_t re[256U], + uint8_t ret[128U] +) +{ + uint8_t uu____0[128U]; + compress_then_serialize_4___128size_t(re, uu____0); + memcpy(ret, uu____0, (size_t)128U * sizeof (uint8_t)); +} + +static inline void into_padded_array___1088size_t(Eurydice_slice slice, uint8_t ret[1088U]) +{ + uint8_t out[1088U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1088U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)1088U * sizeof (uint8_t)); +} + +static void +encrypt_unpacked___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t( + int32_t (*t_as_ntt)[256U], + int32_t (*a_transpose)[3U][256U], + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[1088U] +) +{ + uint8_t prf_input[33U]; + libcrux_kyber_ind_cpa_into_padded_array___33size_t(randomness, prf_input); + uint8_t uu____0[33U]; + memcpy(uu____0, prf_input, (size_t)33U * sizeof (uint8_t)); + __libcrux_kyber_arithmetic_PolynomialRingElement_3size_t__uint8_t + uu____1 = sample_vector_cbd_then_ntt___3size_t_2size_t_128size_t(uu____0, 0U); + int32_t r_as_ntt[3U][256U]; + memcpy(r_as_ntt, uu____1.fst, (size_t)3U * sizeof (int32_t [256U])); + uint8_t domain_separator = uu____1.snd; + int32_t error_1[3U][256U]; + sample_ring_element_cbd___3size_t_128size_t_2size_t(prf_input, &domain_separator, error_1); + prf_input[32U] = domain_separator; + uint8_t prf_output[128U]; + libcrux_kyber_hash_functions_PRF___128size_t(Eurydice_array_to_slice((size_t)33U, + prf_input, + uint8_t, + Eurydice_slice), + prf_output); + int32_t error_2[256U]; + libcrux_kyber_sampling_sample_from_binomial_distribution___2size_t(Eurydice_array_to_slice((size_t)128U, + prf_output, + uint8_t, + Eurydice_slice), + error_2); + int32_t u[3U][256U]; + compute_vector_u___3size_t(a_transpose, r_as_ntt, error_1, u); + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + int32_t message_as_ring_element[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_message(uu____2, message_as_ring_element); + int32_t v[256U]; + compute_ring_element_v___3size_t(t_as_ntt, r_as_ntt, &error_2, &message_as_ring_element, v); + int32_t uu____3[3U][256U]; + memcpy(uu____3, u, (size_t)3U * sizeof (int32_t [256U])); + uint8_t c1[960U]; + compress_then_serialize_u___3size_t_960size_t_10size_t_320size_t(uu____3, c1); + uint8_t c2[128U]; + libcrux_kyber_serialize_compress_then_serialize_ring_element_v___4size_t_128size_t(v, c2); + uint8_t ciphertext[1088U]; + into_padded_array___1088size_t(Eurydice_array_to_slice((size_t)960U, + c1, + uint8_t, + Eurydice_slice), + ciphertext); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)1088U, + ciphertext, + (size_t)960U, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + core_array___Array_T__N__23__as_slice((size_t)128U, c2, uint8_t, Eurydice_slice), + uint8_t, + void *); + memcpy(ret, ciphertext, (size_t)1088U * sizeof (uint8_t)); +} + +static void +encrypt___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t( + Eurydice_slice public_key, + uint8_t message[32U], + Eurydice_slice randomness, + uint8_t ret[1088U] +) +{ + int32_t t_as_ntt[3U][256U]; + deserialize_ring_elements_reduced___1152size_t_3size_t(Eurydice_slice_subslice_to(public_key, + (size_t)1152U, + uint8_t, + size_t, + Eurydice_slice), + t_as_ntt); + Eurydice_slice + seed = Eurydice_slice_subslice_from(public_key, (size_t)1152U, uint8_t, size_t, Eurydice_slice); + int32_t a_transpose[3U][3U][256U]; + uint8_t ret0[34U]; + libcrux_kyber_ind_cpa_into_padded_array___34size_t(seed, ret0); + sample_matrix_A___3size_t(ret0, false, a_transpose); + int32_t (*uu____0)[256U] = t_as_ntt; + int32_t (*uu____1)[3U][256U] = a_transpose; + uint8_t uu____2[32U]; + memcpy(uu____2, message, (size_t)32U * sizeof (uint8_t)); + uint8_t ret1[1088U]; + encrypt_unpacked___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t(uu____0, + uu____1, + uu____2, + randomness, + ret1); + memcpy(ret, ret1, (size_t)1088U * sizeof (uint8_t)); +} + +typedef uint8_t MlKemCiphertext___1088size_t[1088U]; + +static K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_ +encapsulate___3size_t_1088size_t_1184size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t( + uint8_t (*public_key)[1184U], + uint8_t randomness[32U] +) +{ + uint8_t to_hash[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + randomness, + uint8_t, + Eurydice_slice), + to_hash); + Eurydice_slice + uu____0 = + Eurydice_array_to_subslice_from((size_t)64U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + size_t, + Eurydice_slice); + uint8_t ret[32U]; + libcrux_kyber_hash_functions_H(Eurydice_array_to_slice((size_t)1184U, + as_slice___1184size_t(public_key), + uint8_t, + Eurydice_slice), + ret); + core_slice___Slice_T___copy_from_slice(uu____0, + Eurydice_array_to_slice((size_t)32U, ret, uint8_t, Eurydice_slice), + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____1.fst; + Eurydice_slice pseudorandomness = uu____1.snd; + Eurydice_slice + uu____2 = + Eurydice_array_to_slice((size_t)1184U, + as_slice___1184size_t(public_key), + uint8_t, + Eurydice_slice); + uint8_t uu____3[32U]; + memcpy(uu____3, randomness, (size_t)32U * sizeof (uint8_t)); + uint8_t ciphertext[1088U]; + encrypt___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t(uu____2, + uu____3, + pseudorandomness, + ciphertext); + uint8_t shared_secret_array[32U] = { 0U }; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_slice((size_t)32U, + shared_secret_array, + uint8_t, + Eurydice_slice), + shared_secret, + uint8_t, + void *); + uint8_t uu____4[1088U]; + memcpy(uu____4, ciphertext, (size_t)1088U * sizeof (uint8_t)); + uint8_t uu____5[1088U]; + memcpy(uu____5, uu____4, (size_t)1088U * sizeof (uint8_t)); + uint8_t uu____6[32U]; + memcpy(uu____6, shared_secret_array, (size_t)32U * sizeof (uint8_t)); + K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_ lit; + memcpy(lit.fst, uu____5, (size_t)1088U * sizeof (uint8_t)); + memcpy(lit.snd, uu____6, (size_t)32U * sizeof (uint8_t)); + return lit; +} + +K___libcrux_kyber_types_MlKemCiphertext__1088size_t___uint8_t_32size_t_ +libcrux_kyber_kyber768_encapsulate(uint8_t (*public_key)[1184U], uint8_t randomness[32U]) +{ + uint8_t (*uu____0)[1184U] = public_key; + uint8_t uu____1[32U]; + memcpy(uu____1, randomness, (size_t)32U * sizeof (uint8_t)); + return + encapsulate___3size_t_1088size_t_1184size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t(uu____0, + uu____1); +} + +static K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t +split_at___2400size_t(uint8_t (*self)[2400U], size_t mid) +{ + return + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)2400U, + self[0U], + uint8_t, + Eurydice_slice), + mid, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); +} + +static inline void +deserialize_secret_key___3size_t(Eurydice_slice secret_key, int32_t ret[3U][256U]) +{ + int32_t secret_as_ntt[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(secret_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(secret_key, + uint8_t, + size_t) + / LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT; + i++) + { + size_t i0 = i; + Eurydice_slice + secret_bytes = + Eurydice_slice_subslice(secret_key, + ( + (core_ops_range_Range__size_t){ + .start = i0 * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT, + .end = i0 + * LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + + LIBCRUX_KYBER_CONSTANTS_BYTES_PER_RING_ELEMENT + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_to_uncompressed_ring_element(secret_bytes, uu____0); + memcpy(secret_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, secret_as_ntt, (size_t)3U * sizeof (int32_t [256U])); +} + +void +libcrux_kyber_serialize_deserialize_then_decompress_ring_element_u___10size_t( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_10(serialized, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +void libcrux_kyber_ntt_ntt_vector_u___10size_t(int32_t re[256U], int32_t ret[256U]) +{ + size_t zeta_i = (size_t)0U; + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)7U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)6U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)5U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)4U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)3U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)2U, re); + libcrux_kyber_ntt_ntt_at_layer_3328(&zeta_i, re, (size_t)1U, re); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(re[i0]); + re[i0] = uu____0; + } + memcpy(ret, re, (size_t)256U * sizeof (int32_t)); +} + +static inline void +deserialize_then_decompress_u___3size_t_1088size_t_10size_t( + uint8_t *ciphertext, + int32_t ret[3U][256U] +) +{ + int32_t u_as_ntt[3U][256U]; + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + memcpy(u_as_ntt[i], + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + } + for + (size_t + i = (size_t)0U; + i + < + core_slice___Slice_T___len(Eurydice_array_to_slice((size_t)1088U, + ciphertext, + uint8_t, + Eurydice_slice), + uint8_t, + size_t) + / (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U); + i++) + { + size_t i0 = i; + Eurydice_slice + u_bytes = + Eurydice_array_to_subslice((size_t)1088U, + ciphertext, + ( + (core_ops_range_Range__size_t){ + .start = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U), + .end = i0 + * (LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U) + + LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT * (size_t)10U / (size_t)8U + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice); + int32_t u[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_ring_element_u___10size_t(u_bytes, u); + int32_t uu____0[256U]; + libcrux_kyber_ntt_ntt_vector_u___10size_t(u, uu____0); + memcpy(u_as_ntt[i0], uu____0, (size_t)256U * sizeof (int32_t)); + } + memcpy(ret, u_as_ntt, (size_t)3U * sizeof (int32_t [256U])); +} + +void +libcrux_kyber_serialize_deserialize_then_decompress_ring_element_v___4size_t( + Eurydice_slice serialized, + int32_t ret[256U] +) +{ + int32_t uu____0[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_4(serialized, uu____0); + memcpy(ret, uu____0, (size_t)256U * sizeof (int32_t)); +} + +static inline void +compute_message___3size_t( + int32_t (*v)[256U], + int32_t (*secret_as_ntt)[256U], + int32_t (*u_as_ntt)[256U], + int32_t ret[256U] +) +{ + int32_t result[256U]; + memcpy(result, + libcrux_kyber_arithmetic__libcrux_kyber__arithmetic__PolynomialRingElement__ZERO, + (size_t)256U * sizeof (int32_t)); + for (size_t i = (size_t)0U; i < (size_t)3U; i++) + { + size_t i0 = i; + int32_t product[256U]; + libcrux_kyber_ntt_ntt_multiply(&secret_as_ntt[i0], &u_as_ntt[i0], product); + add_to_ring_element___3size_t(result, &product, result); + } + invert_ntt_montgomery___3size_t(result, result); + for (size_t i = (size_t)0U; i < LIBCRUX_KYBER_CONSTANTS_COEFFICIENTS_IN_RING_ELEMENT; i++) + { + size_t i0 = i; + int32_t + coefficient_normal_form = + libcrux_kyber_arithmetic_montgomery_reduce(result[i0] * (int32_t)1441); + int32_t uu____0 = libcrux_kyber_arithmetic_barrett_reduce(v[0U][i0] - coefficient_normal_form); + result[i0] = uu____0; + } + memcpy(ret, result, (size_t)256U * sizeof (int32_t)); +} + +static void +decrypt_unpacked___3size_t_1088size_t_960size_t_10size_t_4size_t( + int32_t (*secret_as_ntt)[256U], + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t u_as_ntt[3U][256U]; + deserialize_then_decompress_u___3size_t_1088size_t_10size_t(ciphertext, u_as_ntt); + int32_t v[256U]; + libcrux_kyber_serialize_deserialize_then_decompress_ring_element_v___4size_t(Eurydice_array_to_subslice_from((size_t)1088U, + ciphertext, + (size_t)960U, + uint8_t, + size_t, + Eurydice_slice), + v); + int32_t message[256U]; + compute_message___3size_t(&v, secret_as_ntt, u_as_ntt, message); + uint8_t ret0[32U]; + libcrux_kyber_serialize_compress_then_serialize_message(message, ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decrypt___3size_t_1088size_t_960size_t_10size_t_4size_t( + Eurydice_slice secret_key, + uint8_t *ciphertext, + uint8_t ret[32U] +) +{ + int32_t secret_as_ntt[3U][256U]; + deserialize_secret_key___3size_t(secret_key, secret_as_ntt); + uint8_t ret0[32U]; + decrypt_unpacked___3size_t_1088size_t_960size_t_10size_t_4size_t(secret_as_ntt, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static inline void into_padded_array___1120size_t(Eurydice_slice slice, uint8_t ret[1120U]) +{ + uint8_t out[1120U] = { 0U }; + uint8_t *uu____0 = out; + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice((size_t)1120U, + uu____0, + ( + (core_ops_range_Range__size_t){ + .start = (size_t)0U, + .end = core_slice___Slice_T___len(slice, uint8_t, size_t) + } + ), + uint8_t, + core_ops_range_Range__size_t, + Eurydice_slice), + slice, + uint8_t, + void *); + memcpy(ret, out, (size_t)1120U * sizeof (uint8_t)); +} + +static Eurydice_slice as_ref___1088size_t(uint8_t (*self)[1088U]) +{ + return Eurydice_array_to_slice((size_t)1088U, self[0U], uint8_t, Eurydice_slice); +} + +void libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_slice input, uint8_t ret[32U]) +{ + uint8_t ret0[32U]; + libcrux_digest_shake256((size_t)32U, input, ret0, void *); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static uint8_t +compare_ciphertexts_in_constant_time___1088size_t(Eurydice_slice lhs, Eurydice_slice rhs) +{ + uint8_t r = 0U; + for (size_t i = (size_t)0U; i < (size_t)1088U; i++) + { + size_t i0 = i; + uint8_t uu____0 = Eurydice_slice_index(lhs, i0, uint8_t, uint8_t); + r = + (uint32_t)r + | ((uint32_t)uu____0 ^ (uint32_t)Eurydice_slice_index(rhs, i0, uint8_t, uint8_t)); + } + return libcrux_kyber_constant_time_ops_is_non_zero(r); +} + +static void +decapsulate___3size_t_2400size_t_1152size_t_1184size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t_1120size_t( + uint8_t (*secret_key)[2400U], + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +) +{ + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = split_at___2400size_t(secret_key, (size_t)1152U); + Eurydice_slice ind_cpa_secret_key = uu____0.fst; + Eurydice_slice secret_key0 = uu____0.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____1 = + core_slice___Slice_T___split_at(secret_key0, + (size_t)1184U, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key = uu____1.fst; + Eurydice_slice secret_key1 = uu____1.snd; + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____2 = + core_slice___Slice_T___split_at(secret_key1, + LIBCRUX_KYBER_CONSTANTS_H_DIGEST_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice ind_cpa_public_key_hash = uu____2.fst; + Eurydice_slice implicit_rejection_value = uu____2.snd; + uint8_t decrypted[32U]; + decrypt___3size_t_1088size_t_960size_t_10size_t_4size_t(ind_cpa_secret_key, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____3 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____3.fst; + Eurydice_slice pseudorandomness = uu____3.snd; + uint8_t to_hash[1120U]; + into_padded_array___1120size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____4 = + Eurydice_array_to_subslice_from((size_t)1120U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____4, + as_ref___1088size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)1120U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + Eurydice_slice uu____5 = ind_cpa_public_key; + uint8_t uu____6[32U]; + memcpy(uu____6, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[1088U]; + encrypt___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t(uu____5, + uu____6, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____7 = as_ref___1088size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___1088size_t(uu____7, + Eurydice_array_to_slice((size_t)1088U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____8 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____8, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber768_decapsulate( + uint8_t (*secret_key)[2400U], + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate___3size_t_2400size_t_1152size_t_1184size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t_1120size_t(secret_key, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +static void +decapsulate_unpacked___3size_t_2400size_t_1152size_t_1184size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t_1120size_t( + libcrux_kyber_MlKemState___3size_t *state, + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +) +{ + int32_t (*secret_as_ntt)[256U] = state->secret_as_ntt; + int32_t (*t_as_ntt)[256U] = state->t_as_ntt; + int32_t (*a_transpose)[3U][256U] = state->a_transpose; + Eurydice_slice + implicit_rejection_value = + Eurydice_array_to_slice((size_t)32U, + state->rej, + uint8_t, + Eurydice_slice); + Eurydice_slice + ind_cpa_public_key_hash = + Eurydice_array_to_slice((size_t)32U, + state->ind_cpa_public_key_hash, + uint8_t, + Eurydice_slice); + uint8_t decrypted[32U]; + decrypt_unpacked___3size_t_1088size_t_960size_t_10size_t_4size_t(secret_as_ntt, + ciphertext[0U], + decrypted); + uint8_t to_hash0[64U]; + libcrux_kyber_ind_cpa_into_padded_array___64size_t(Eurydice_array_to_slice((size_t)32U, + decrypted, + uint8_t, + Eurydice_slice), + to_hash0); + core_slice___Slice_T___copy_from_slice(Eurydice_array_to_subslice_from((size_t)64U, + to_hash0, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice), + ind_cpa_public_key_hash, + uint8_t, + void *); + uint8_t hashed[64U]; + libcrux_kyber_hash_functions_G(Eurydice_array_to_slice((size_t)64U, + to_hash0, + uint8_t, + Eurydice_slice), + hashed); + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t + uu____0 = + core_slice___Slice_T___split_at(Eurydice_array_to_slice((size_t)64U, + hashed, + uint8_t, + Eurydice_slice), + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + K___Eurydice_slice_uint8_t_Eurydice_slice_uint8_t); + Eurydice_slice shared_secret = uu____0.fst; + Eurydice_slice pseudorandomness = uu____0.snd; + uint8_t to_hash[1120U]; + into_padded_array___1120size_t(implicit_rejection_value, to_hash); + Eurydice_slice + uu____1 = + Eurydice_array_to_subslice_from((size_t)1120U, + to_hash, + LIBCRUX_KYBER_CONSTANTS_SHARED_SECRET_SIZE, + uint8_t, + size_t, + Eurydice_slice); + core_slice___Slice_T___copy_from_slice(uu____1, + as_ref___1088size_t(ciphertext), + uint8_t, + void *); + uint8_t implicit_rejection_shared_secret[32U]; + libcrux_kyber_hash_functions_PRF___32size_t(Eurydice_array_to_slice((size_t)1120U, + to_hash, + uint8_t, + Eurydice_slice), + implicit_rejection_shared_secret); + int32_t (*uu____2)[256U] = t_as_ntt; + int32_t (*uu____3)[3U][256U] = a_transpose; + uint8_t uu____4[32U]; + memcpy(uu____4, decrypted, (size_t)32U * sizeof (uint8_t)); + uint8_t expected_ciphertext[1088U]; + encrypt_unpacked___3size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t(uu____2, + uu____3, + uu____4, + pseudorandomness, + expected_ciphertext); + Eurydice_slice uu____5 = as_ref___1088size_t(ciphertext); + uint8_t + selector = + compare_ciphertexts_in_constant_time___1088size_t(uu____5, + Eurydice_array_to_slice((size_t)1088U, expected_ciphertext, uint8_t, Eurydice_slice)); + Eurydice_slice uu____6 = shared_secret; + uint8_t ret0[32U]; + libcrux_kyber_constant_time_ops_select_shared_secret_in_constant_time(uu____6, + Eurydice_array_to_slice((size_t)32U, implicit_rejection_shared_secret, uint8_t, Eurydice_slice), + selector, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + +void +libcrux_kyber_kyber768_decapsulate_unpacked( + libcrux_kyber_MlKemState___3size_t *state, + uint8_t (*ciphertext)[1088U], + uint8_t ret[32U] +) +{ + uint8_t ret0[32U]; + decapsulate_unpacked___3size_t_2400size_t_1152size_t_1184size_t_1088size_t_1152size_t_960size_t_128size_t_10size_t_4size_t_320size_t_2size_t_128size_t_2size_t_128size_t_1120size_t(state, + ciphertext, + ret0); + memcpy(ret, ret0, (size_t)32U * sizeof (uint8_t)); +} + diff --git a/libcrux/standalone-kyber.sh b/libcrux/standalone-kyber.sh new file mode 100755 index 000000000..1efb77faa --- /dev/null +++ b/libcrux/standalone-kyber.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -e +set -o pipefail + +mkdir -p hacl +# SHA3, hash interface +cp ../src/Hacl_Hash_SHA3.c hacl/ +cp ../include/Hacl_Hash_SHA3.h include/ +cp ../include/internal/Hacl_Hash_SHA3.h include/internal/ +# SHA3, AVX2 implementation +cp ../src/Hacl_Hash_SHA3_Simd256.c hacl/ +cp ../include/Hacl_Hash_SHA3_Simd256.h include/ +# SHA3, scalar implementation +cp ../src/Hacl_Hash_SHA3_Scalar.c hacl/ +cp ../include/Hacl_Hash_SHA3_Scalar.h include/ +cp ../include/internal/Hacl_Hash_SHA3_Scalar.h include/internal/ +# Auxiliary +cp ../include/Hacl_Streaming_Types.h include/ +cp ../include/libintvector.h include/ +touch include/LowStar_Ignore.h +# krmllib +cp -r ../karamel/include/* include/ +cp -r ../karamel/krmllib/dist/minimal/* include/ + +tar cjvf standalone-kyber-$(date '+%Y%m%d%H%M').tar.bz2 --exclude "src/Libcrux_Kem_Kyber_Kyber768.c" --exclude "mitch-and-sam.sh" --exclude '*.tar.bz2' --exclude 'a.out' * diff --git a/mach b/mach index 005c26ba5..870ab2fd3 100755 --- a/mach +++ b/mach @@ -172,6 +172,7 @@ def build(args): Supported sanitizers: - asan - ubsan + - msan Use an edition if you want a different build. Note that this build will use the MSVC version by default on Windows. @@ -197,7 +198,7 @@ def build(args): if not os.path.exists("build"): os.mkdir("build") - cmake_args = [] + cmake_args = ["-DBUILD_LIBCRUX=1"] # Verbosity verbose = False if args.verbose: @@ -475,6 +476,7 @@ def build(args): # test if requested if args.test: run_tests(config.tests, build_config, coverage=args.coverage) + run_tests(config.libcrux_tests, build_config, coverage=args.coverage) # benchmark if requested if args.benchmark: diff --git a/rust/hacl-sys/src/bindings/bindings.rs b/rust/hacl-sys/src/bindings/bindings.rs index fd024e15c..31b89732f 100644 --- a/rust/hacl-sys/src/bindings/bindings.rs +++ b/rust/hacl-sys/src/bindings/bindings.rs @@ -884,10 +884,8 @@ extern "C" { output: *mut u8, ); } -pub type __m128i = [::std::os::raw::c_longlong; 2usize]; -pub type Lib_IntVector_Intrinsics_vec128 = __m128i; -pub type __m256i = [::std::os::raw::c_longlong; 4usize]; -pub type Lib_IntVector_Intrinsics_vec256 = __m256i; +pub type uint32x4_t = [u32; 4usize]; +pub type Lib_IntVector_Intrinsics_vec128 = uint32x4_t; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Hacl_Hash_Blake2s_Simd128_block_state_t_s { @@ -944,8 +942,8 @@ extern "C" { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct Hacl_Hash_Blake2b_Simd256_block_state_t_s { - pub fst: *mut Lib_IntVector_Intrinsics_vec256, - pub snd: *mut Lib_IntVector_Intrinsics_vec256, + pub fst: *mut *mut ::std::os::raw::c_void, + pub snd: *mut *mut ::std::os::raw::c_void, } pub type Hacl_Hash_Blake2b_Simd256_block_state_t = Hacl_Hash_Blake2b_Simd256_block_state_t_s; #[repr(C)] diff --git a/src/Hacl_Hash_SHA3_Scalar.c b/src/Hacl_Hash_SHA3_Scalar.c new file mode 100644 index 000000000..6d6806a37 --- /dev/null +++ b/src/Hacl_Hash_SHA3_Scalar.c @@ -0,0 +1,2798 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "internal/Hacl_Hash_SHA3_Scalar.h" + +const +uint32_t +Hacl_Impl_SHA3_Vec_keccak_rotc[24U] = + { + 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, + 39U, 61U, 20U, 44U + }; + +const +uint32_t +Hacl_Impl_SHA3_Vec_keccak_piln[24U] = + { + 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, + 22U, 9U, 6U, 1U + }; + +const +uint64_t +Hacl_Impl_SHA3_Vec_keccak_rndc[24U] = + { + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL + }; + +void +Hacl_Hash_SHA3_Scalar_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rateInBytes = 168U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Scalar_shake256( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rateInBytes = 136U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 144U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 28U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 136U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 32U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 104U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 48U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 72U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 64U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +uint64_t *Hacl_Hash_SHA3_Scalar_state_malloc(void) +{ + uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); + return buf; +} + +void Hacl_Hash_SHA3_Scalar_state_free(uint64_t *s) +{ + KRML_HOST_FREE(s); +} + +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } +} + +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rem = inputByteLen % 168U; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % 168U; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[167U] = 0x80U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws0[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws0[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws0[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws0[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws0[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws0[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws0[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws0[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws0[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws0[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws0[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws0[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws0[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws0[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws0[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws0[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws0[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws0[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws0[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws0[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws0[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws0[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws0[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws0[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws0[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws0[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws0[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws0[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws0[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws0[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws0[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws0[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws0[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + state[0U] = state[0U] ^ c; + } +} + +void +Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, state, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } +} + diff --git a/src/Hacl_Hash_SHA3_Simd256.c b/src/Hacl_Hash_SHA3_Simd256.c new file mode 100644 index 000000000..0a530d970 --- /dev/null +++ b/src/Hacl_Hash_SHA3_Simd256.c @@ -0,0 +1,11356 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "Hacl_Hash_SHA3_Simd256.h" + +#include "internal/Hacl_Hash_SHA3_Scalar.h" + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 168U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 136U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 144U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 28U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 136U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 32U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 104U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 48U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 72U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 64U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_SHA3_Simd256_state_malloc(void) +{ + Lib_IntVector_Intrinsics_vec256 *buf = (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + for (int i = 0; i < 25; i++){ + buf[i] = Lib_IntVector_Intrinsics_vec256_zero; + } + return buf; +} + +void Hacl_Hash_SHA3_Simd256_state_free(Lib_IntVector_Intrinsics_vec256 *s) +{ + KRML_ALIGNED_FREE(s); +} + +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * 168U, 168U * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + uint32_t rem = inputByteLen % 168U; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % 168U; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws32 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws[0U] = ws00; + ws[1U] = ws110; + ws[2U] = ws210; + ws[3U] = ws32; + ws[4U] = ws40; + ws[5U] = ws50; + ws[6U] = ws60; + ws[7U] = ws70; + ws[8U] = ws80; + ws[9U] = ws90; + ws[10U] = ws100; + ws[11U] = ws111; + ws[12U] = ws120; + ws[13U] = ws130; + ws[14U] = ws140; + ws[15U] = ws150; + ws[16U] = ws160; + ws[17U] = ws170; + ws[18U] = ws180; + ws[19U] = ws190; + ws[20U] = ws200; + ws[21U] = ws211; + ws[22U] = ws220; + ws[23U] = ws230; + ws[24U] = ws240; + ws[25U] = ws250; + ws[26U] = ws260; + ws[27U] = ws270; + ws[28U] = ws280; + ws[29U] = ws290; + ws[30U] = ws300; + ws[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[167U] = 0x80U; + b15[167U] = 0x80U; + b25[167U] = 0x80U; + b35[167U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws33[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + ws33[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws33[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws33[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws33[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws33[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws33[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws33[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws33[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws33[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws33[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws33[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws33[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws33[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws33[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws33[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws33[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws33[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws33[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws33[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws33[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws33[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws33[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws33[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws33[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws33[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws33[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws33[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws33[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws33[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws33[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws33[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws33[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws33[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws33[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws33[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws33[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws33[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws33[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws33[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws33[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws33[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws33[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws33[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws33[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws33[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws33[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws33[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws33[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws33[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws33[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws33[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws33[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws33[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws33[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws33[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws33[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws33[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws33[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws33[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws33[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws33[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws33[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws33[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws33[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws33[0U] = ws0; + ws33[1U] = ws1; + ws33[2U] = ws2; + ws33[3U] = ws3; + ws33[4U] = ws4; + ws33[5U] = ws5; + ws33[6U] = ws6; + ws33[7U] = ws7; + ws33[8U] = ws8; + ws33[9U] = ws9; + ws33[10U] = ws10; + ws33[11U] = ws11; + ws33[12U] = ws12; + ws33[13U] = ws13; + ws33[14U] = ws14; + ws33[15U] = ws15; + ws33[16U] = ws16; + ws33[17U] = ws17; + ws33[18U] = ws18; + ws33[19U] = ws19; + ws33[20U] = ws20; + ws33[21U] = ws21; + ws33[22U] = ws22; + ws33[23U] = ws23; + ws33[24U] = ws24; + ws33[25U] = ws25; + ws33[26U] = ws26; + ws33[27U] = ws27; + ws33[28U] = ws28; + ws33[29U] = ws29; + ws33[30U] = ws30; + ws33[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws33[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v015; + state[1U + 5U * i] = v115; + state[2U + 5U * i] = v215; + state[3U + 5U * i] = v315; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } +} + +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b0 = output0; + uint8_t *b1 = output1; + uint8_t *b2 = output2; + uint8_t *b3 = output3; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + memcpy(b1 + i0 * 168U, hbuf + 256U, 168U * sizeof (uint8_t)); + memcpy(b2 + i0 * 168U, hbuf + 512U, 168U * sizeof (uint8_t)); + memcpy(b3 + i0 * 168U, hbuf + 768U, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + diff --git a/src/msvc/Hacl_Hash_SHA3_Scalar.c b/src/msvc/Hacl_Hash_SHA3_Scalar.c new file mode 100644 index 000000000..6d6806a37 --- /dev/null +++ b/src/msvc/Hacl_Hash_SHA3_Scalar.c @@ -0,0 +1,2798 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "internal/Hacl_Hash_SHA3_Scalar.h" + +const +uint32_t +Hacl_Impl_SHA3_Vec_keccak_rotc[24U] = + { + 1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U, + 39U, 61U, 20U, 44U + }; + +const +uint32_t +Hacl_Impl_SHA3_Vec_keccak_piln[24U] = + { + 10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U, + 22U, 9U, 6U, 1U + }; + +const +uint64_t +Hacl_Impl_SHA3_Vec_keccak_rndc[24U] = + { + 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, + 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, + 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, + 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, + 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, + 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL + }; + +void +Hacl_Hash_SHA3_Scalar_shake128( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rateInBytes = 168U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Scalar_shake256( + uint8_t *output, + uint32_t outputByteLen, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rateInBytes = 136U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_224(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 144U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 28U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_256(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 136U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 32U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_384(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 104U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 48U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +void Hacl_Hash_SHA3_Scalar_sha3_512(uint8_t *output, uint8_t *input, uint32_t inputByteLen) +{ + uint32_t rateInBytes = 72U; + uint64_t s[25U] = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____1 = current; + s[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x06U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws0[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws0[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws0[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws0[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws0[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws0[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws0[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws0[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws0[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws0[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws0[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws0[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws0[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws0[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws0[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws0[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws0[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws0[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws0[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws0[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws0[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws0[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws0[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws0[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws0[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws0[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws0[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws0[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws0[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws0[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws0[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws0[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws0[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[rateInBytes - 1U] = 0x80U; + uint64_t ws1[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws1[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws1[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws1[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws1[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws1[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws1[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws1[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws1[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws1[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws1[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws1[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws1[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws1[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws1[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws1[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws1[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws1[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws1[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws1[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws1[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws1[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws1[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws1[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws1[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws1[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws1[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws1[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws1[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws1[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws1[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws1[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws1[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = s[i] ^ ws1[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____2 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____2 << 1U | uu____2 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____3 = current; + s[_Y] = uu____3 << r | uu____3 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + s[0U] = s[0U] ^ c; + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____4 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____4 << 1U | uu____4 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i2 + 5U * i] = s[i2 + 5U * i] ^ _D;);); + uint64_t x = s[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = s[_Y]; + uint64_t uu____5 = current; + s[_Y] = uu____5 << r | uu____5 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]); + uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]); + uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]); + uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]); + uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + s[0U] = s[0U] ^ c; + } + } + uint32_t remOut = 64U % rateInBytes; + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, s, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); +} + +uint64_t *Hacl_Hash_SHA3_Scalar_state_malloc(void) +{ + uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t)); + return buf; +} + +void Hacl_Hash_SHA3_Scalar_state_free(uint64_t *s) +{ + KRML_HOST_FREE(s); +} + +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_nblocks( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b1[256U] = { 0U }; + uint8_t *b_ = b1; + uint8_t *b0 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b0 + i0 * 168U, 168U * sizeof (uint8_t)); + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u = load64_le(b); + ws[0U] = u; + uint64_t u0 = load64_le(b + 8U); + ws[1U] = u0; + uint64_t u1 = load64_le(b + 16U); + ws[2U] = u1; + uint64_t u2 = load64_le(b + 24U); + ws[3U] = u2; + uint64_t u3 = load64_le(b + 32U); + ws[4U] = u3; + uint64_t u4 = load64_le(b + 40U); + ws[5U] = u4; + uint64_t u5 = load64_le(b + 48U); + ws[6U] = u5; + uint64_t u6 = load64_le(b + 56U); + ws[7U] = u6; + uint64_t u7 = load64_le(b + 64U); + ws[8U] = u7; + uint64_t u8 = load64_le(b + 72U); + ws[9U] = u8; + uint64_t u9 = load64_le(b + 80U); + ws[10U] = u9; + uint64_t u10 = load64_le(b + 88U); + ws[11U] = u10; + uint64_t u11 = load64_le(b + 96U); + ws[12U] = u11; + uint64_t u12 = load64_le(b + 104U); + ws[13U] = u12; + uint64_t u13 = load64_le(b + 112U); + ws[14U] = u13; + uint64_t u14 = load64_le(b + 120U); + ws[15U] = u14; + uint64_t u15 = load64_le(b + 128U); + ws[16U] = u15; + uint64_t u16 = load64_le(b + 136U); + ws[17U] = u16; + uint64_t u17 = load64_le(b + 144U); + ws[18U] = u17; + uint64_t u18 = load64_le(b + 152U); + ws[19U] = u18; + uint64_t u19 = load64_le(b + 160U); + ws[20U] = u19; + uint64_t u20 = load64_le(b + 168U); + ws[21U] = u20; + uint64_t u21 = load64_le(b + 176U); + ws[22U] = u21; + uint64_t u22 = load64_le(b + 184U); + ws[23U] = u22; + uint64_t u23 = load64_le(b + 192U); + ws[24U] = u23; + uint64_t u24 = load64_le(b + 200U); + ws[25U] = u24; + uint64_t u25 = load64_le(b + 208U); + ws[26U] = u25; + uint64_t u26 = load64_le(b + 216U); + ws[27U] = u26; + uint64_t u27 = load64_le(b + 224U); + ws[28U] = u27; + uint64_t u28 = load64_le(b + 232U); + ws[29U] = u28; + uint64_t u29 = load64_le(b + 240U); + ws[30U] = u29; + uint64_t u30 = load64_le(b + 248U); + ws[31U] = u30; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } +} + +void +Hacl_Hash_SHA3_Scalar_shake128_absorb_final( + uint64_t *state, + uint8_t *input, + uint32_t inputByteLen +) +{ + uint32_t rem = inputByteLen % 168U; + uint8_t b2[256U] = { 0U }; + uint8_t *b_ = b2; + uint32_t rem1 = inputByteLen % 168U; + uint8_t *b00 = input; + uint8_t *bl0 = b_; + memcpy(bl0, b00 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b01 = b_; + b01[rem] = 0x1FU; + uint64_t ws[32U] = { 0U }; + uint8_t *b = b_; + uint64_t u0 = load64_le(b); + ws[0U] = u0; + uint64_t u1 = load64_le(b + 8U); + ws[1U] = u1; + uint64_t u2 = load64_le(b + 16U); + ws[2U] = u2; + uint64_t u3 = load64_le(b + 24U); + ws[3U] = u3; + uint64_t u4 = load64_le(b + 32U); + ws[4U] = u4; + uint64_t u5 = load64_le(b + 40U); + ws[5U] = u5; + uint64_t u6 = load64_le(b + 48U); + ws[6U] = u6; + uint64_t u7 = load64_le(b + 56U); + ws[7U] = u7; + uint64_t u8 = load64_le(b + 64U); + ws[8U] = u8; + uint64_t u9 = load64_le(b + 72U); + ws[9U] = u9; + uint64_t u10 = load64_le(b + 80U); + ws[10U] = u10; + uint64_t u11 = load64_le(b + 88U); + ws[11U] = u11; + uint64_t u12 = load64_le(b + 96U); + ws[12U] = u12; + uint64_t u13 = load64_le(b + 104U); + ws[13U] = u13; + uint64_t u14 = load64_le(b + 112U); + ws[14U] = u14; + uint64_t u15 = load64_le(b + 120U); + ws[15U] = u15; + uint64_t u16 = load64_le(b + 128U); + ws[16U] = u16; + uint64_t u17 = load64_le(b + 136U); + ws[17U] = u17; + uint64_t u18 = load64_le(b + 144U); + ws[18U] = u18; + uint64_t u19 = load64_le(b + 152U); + ws[19U] = u19; + uint64_t u20 = load64_le(b + 160U); + ws[20U] = u20; + uint64_t u21 = load64_le(b + 168U); + ws[21U] = u21; + uint64_t u22 = load64_le(b + 176U); + ws[22U] = u22; + uint64_t u23 = load64_le(b + 184U); + ws[23U] = u23; + uint64_t u24 = load64_le(b + 192U); + ws[24U] = u24; + uint64_t u25 = load64_le(b + 200U); + ws[25U] = u25; + uint64_t u26 = load64_le(b + 208U); + ws[26U] = u26; + uint64_t u27 = load64_le(b + 216U); + ws[27U] = u27; + uint64_t u28 = load64_le(b + 224U); + ws[28U] = u28; + uint64_t u29 = load64_le(b + 232U); + ws[29U] = u29; + uint64_t u30 = load64_le(b + 240U); + ws[30U] = u30; + uint64_t u31 = load64_le(b + 248U); + ws[31U] = u31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws[i]; + } + uint8_t b3[256U] = { 0U }; + uint8_t *b4 = b3; + uint8_t *b0 = b4; + b0[167U] = 0x80U; + uint64_t ws0[32U] = { 0U }; + uint8_t *b1 = b4; + uint64_t u = load64_le(b1); + ws0[0U] = u; + uint64_t u32 = load64_le(b1 + 8U); + ws0[1U] = u32; + uint64_t u33 = load64_le(b1 + 16U); + ws0[2U] = u33; + uint64_t u34 = load64_le(b1 + 24U); + ws0[3U] = u34; + uint64_t u35 = load64_le(b1 + 32U); + ws0[4U] = u35; + uint64_t u36 = load64_le(b1 + 40U); + ws0[5U] = u36; + uint64_t u37 = load64_le(b1 + 48U); + ws0[6U] = u37; + uint64_t u38 = load64_le(b1 + 56U); + ws0[7U] = u38; + uint64_t u39 = load64_le(b1 + 64U); + ws0[8U] = u39; + uint64_t u40 = load64_le(b1 + 72U); + ws0[9U] = u40; + uint64_t u41 = load64_le(b1 + 80U); + ws0[10U] = u41; + uint64_t u42 = load64_le(b1 + 88U); + ws0[11U] = u42; + uint64_t u43 = load64_le(b1 + 96U); + ws0[12U] = u43; + uint64_t u44 = load64_le(b1 + 104U); + ws0[13U] = u44; + uint64_t u45 = load64_le(b1 + 112U); + ws0[14U] = u45; + uint64_t u46 = load64_le(b1 + 120U); + ws0[15U] = u46; + uint64_t u47 = load64_le(b1 + 128U); + ws0[16U] = u47; + uint64_t u48 = load64_le(b1 + 136U); + ws0[17U] = u48; + uint64_t u49 = load64_le(b1 + 144U); + ws0[18U] = u49; + uint64_t u50 = load64_le(b1 + 152U); + ws0[19U] = u50; + uint64_t u51 = load64_le(b1 + 160U); + ws0[20U] = u51; + uint64_t u52 = load64_le(b1 + 168U); + ws0[21U] = u52; + uint64_t u53 = load64_le(b1 + 176U); + ws0[22U] = u53; + uint64_t u54 = load64_le(b1 + 184U); + ws0[23U] = u54; + uint64_t u55 = load64_le(b1 + 192U); + ws0[24U] = u55; + uint64_t u56 = load64_le(b1 + 200U); + ws0[25U] = u56; + uint64_t u57 = load64_le(b1 + 208U); + ws0[26U] = u57; + uint64_t u58 = load64_le(b1 + 216U); + ws0[27U] = u58; + uint64_t u59 = load64_le(b1 + 224U); + ws0[28U] = u59; + uint64_t u60 = load64_le(b1 + 232U); + ws0[29U] = u60; + uint64_t u61 = load64_le(b1 + 240U); + ws0[30U] = u61; + uint64_t u62 = load64_le(b1 + 248U); + ws0[31U] = u62; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = state[i] ^ ws0[i]; + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = state[i + 0U] ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i1 + 1U) % 5U]; + uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i1 + 5U * i] = state[i1 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + state[0U] = state[0U] ^ c; + } +} + +void +Hacl_Hash_SHA3_Scalar_shake128_squeeze_nblocks( + uint64_t *state, + uint8_t *output, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[256U] = { 0U }; + uint64_t ws[32U] = { 0U }; + memcpy(ws, state, 25U * sizeof (uint64_t)); + for (uint32_t i = 0U; i < 32U; i++) + { + store64_le(hbuf + i * 8U, ws[i]); + } + memcpy(output + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + uint64_t _C[5U] = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + _C[i] = + state[i + + 0U] + ^ (state[i + 5U] ^ (state[i + 10U] ^ (state[i + 15U] ^ state[i + 20U])));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + uint64_t uu____0 = _C[(i2 + 1U) % 5U]; + uint64_t _D = _C[(i2 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U); + KRML_MAYBE_FOR5(i, 0U, 5U, 1U, state[i2 + 5U * i] = state[i2 + 5U * i] ^ _D;);); + uint64_t x = state[1U]; + uint64_t current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + uint64_t temp = state[_Y]; + uint64_t uu____1 = current; + state[_Y] = uu____1 << r | uu____1 >> (64U - r); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + uint64_t v0 = state[0U + 5U * i] ^ (~state[1U + 5U * i] & state[2U + 5U * i]); + uint64_t v1 = state[1U + 5U * i] ^ (~state[2U + 5U * i] & state[3U + 5U * i]); + uint64_t v2 = state[2U + 5U * i] ^ (~state[3U + 5U * i] & state[4U + 5U * i]); + uint64_t v3 = state[3U + 5U * i] ^ (~state[4U + 5U * i] & state[0U + 5U * i]); + uint64_t v4 = state[4U + 5U * i] ^ (~state[0U + 5U * i] & state[1U + 5U * i]); + state[0U + 5U * i] = v0; + state[1U + 5U * i] = v1; + state[2U + 5U * i] = v2; + state[3U + 5U * i] = v3; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + state[0U] = state[0U] ^ c; + } + } +} + diff --git a/src/msvc/Hacl_Hash_SHA3_Simd256.c b/src/msvc/Hacl_Hash_SHA3_Simd256.c new file mode 100644 index 000000000..9046f3dbe --- /dev/null +++ b/src/msvc/Hacl_Hash_SHA3_Simd256.c @@ -0,0 +1,11353 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "Hacl_Hash_SHA3_Simd256.h" + +#include "internal/Hacl_Hash_SHA3_Scalar.h" + +void +Hacl_Hash_SHA3_Simd256_shake128( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 168U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_shake256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 136U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < outputByteLen / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = outputByteLen % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + outputByteLen - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + outputByteLen - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + outputByteLen - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + outputByteLen - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_224( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 144U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 28U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 28U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 28U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 28U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 28U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 28U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_256( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 136U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 32U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 32U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 32U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 32U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 32U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 32U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_384( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 104U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 48U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 48U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 48U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 48U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 48U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 48U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +void +Hacl_Hash_SHA3_Simd256_sha3_512( + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + rb = { .fst = output0, .snd = { .fst = output1, .snd = { .fst = output2, .snd = output3 } } }; + uint32_t rateInBytes = 72U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[25U] KRML_POST_ALIGN(32) = { 0U }; + for (uint32_t i0 = 0U; i0 < inputByteLen / rateInBytes; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * rateInBytes, rateInBytes * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, s[1U + 5U * i])); + s[0U + 5U * i] = v07; + s[1U + 5U * i] = v17; + s[2U + 5U * i] = v27; + s[3U + 5U * i] = v37; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t rem = inputByteLen % rateInBytes; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % rateInBytes; + uint8_t *b31 = ib.snd.snd.snd; + uint8_t *b21 = ib.snd.snd.fst; + uint8_t *b11 = ib.snd.fst; + uint8_t *b01 = ib.fst; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x06U; + b12[rem] = 0x06U; + b22[rem] = 0x06U; + b32[rem] = 0x06U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws32[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws32[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws32[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws32[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws32[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws32[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws32[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws32[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws32[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws32[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws32[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws32[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws32[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws32[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws32[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws32[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws32[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws32[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws32[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws32[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws32[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws32[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws32[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws32[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws32[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws32[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws32[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws32[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws32[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws32[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws32[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws32[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws32[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws32[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws32[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws32[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws32[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws33 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws32[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws32[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws32[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws32[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws32[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws32[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws32[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws32[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws32[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws32[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws32[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws32[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws32[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws32[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws32[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws32[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws32[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws32[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws32[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws32[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws32[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws32[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws32[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws32[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws32[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws32[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws32[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws32[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws32[0U] = ws00; + ws32[1U] = ws110; + ws32[2U] = ws210; + ws32[3U] = ws33; + ws32[4U] = ws40; + ws32[5U] = ws50; + ws32[6U] = ws60; + ws32[7U] = ws70; + ws32[8U] = ws80; + ws32[9U] = ws90; + ws32[10U] = ws100; + ws32[11U] = ws111; + ws32[12U] = ws120; + ws32[13U] = ws130; + ws32[14U] = ws140; + ws32[15U] = ws150; + ws32[16U] = ws160; + ws32[17U] = ws170; + ws32[18U] = ws180; + ws32[19U] = ws190; + ws32[20U] = ws200; + ws32[21U] = ws211; + ws32[22U] = ws220; + ws32[23U] = ws230; + ws32[24U] = ws240; + ws32[25U] = ws250; + ws32[26U] = ws260; + ws32[27U] = ws270; + ws32[28U] = ws280; + ws32[29U] = ws290; + ws32[30U] = ws300; + ws32[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws32[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[rateInBytes - 1U] = 0x80U; + b15[rateInBytes - 1U] = 0x80U; + b25[rateInBytes - 1U] = 0x80U; + b35[rateInBytes - 1U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws34[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b26 = b.snd.snd.fst; + uint8_t *b16 = b.snd.fst; + uint8_t *b06 = b.fst; + ws34[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06); + ws34[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16); + ws34[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26); + ws34[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws34[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 32U); + ws34[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 32U); + ws34[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 32U); + ws34[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws34[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 64U); + ws34[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 64U); + ws34[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 64U); + ws34[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws34[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 96U); + ws34[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 96U); + ws34[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 96U); + ws34[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws34[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 128U); + ws34[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 128U); + ws34[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 128U); + ws34[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws34[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 160U); + ws34[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 160U); + ws34[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 160U); + ws34[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws34[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 192U); + ws34[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 192U); + ws34[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 192U); + ws34[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws34[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b06 + 224U); + ws34[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b16 + 224U); + ws34[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b26 + 224U); + ws34[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws34[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws34[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws34[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws34[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws01 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws112 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws212 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws35 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws34[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws34[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws34[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws34[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws41 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws51 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws61 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws71 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws34[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws34[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws34[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws34[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws81 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws91 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws101 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws113 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws34[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws34[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws34[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws34[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws121 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws131 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws141 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws151 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws34[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws34[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws34[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws34[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws161 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws171 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws181 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws191 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws34[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws34[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws34[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws34[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws201 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws213 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws221 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws231 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws34[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws34[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws34[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws34[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws241 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws251 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws261 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws271 = v3__13; + Lib_IntVector_Intrinsics_vec256 v015 = ws34[28U]; + Lib_IntVector_Intrinsics_vec256 v115 = ws34[29U]; + Lib_IntVector_Intrinsics_vec256 v215 = ws34[30U]; + Lib_IntVector_Intrinsics_vec256 v315 = ws34[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v015, v115); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v215, v315); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws281 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws291 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws301 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws311 = v3__14; + ws34[0U] = ws01; + ws34[1U] = ws112; + ws34[2U] = ws212; + ws34[3U] = ws35; + ws34[4U] = ws41; + ws34[5U] = ws51; + ws34[6U] = ws61; + ws34[7U] = ws71; + ws34[8U] = ws81; + ws34[9U] = ws91; + ws34[10U] = ws101; + ws34[11U] = ws113; + ws34[12U] = ws121; + ws34[13U] = ws131; + ws34[14U] = ws141; + ws34[15U] = ws151; + ws34[16U] = ws161; + ws34[17U] = ws171; + ws34[18U] = ws181; + ws34[19U] = ws191; + ws34[20U] = ws201; + ws34[21U] = ws213; + ws34[22U] = ws221; + ws34[23U] = ws231; + ws34[24U] = ws241; + ws34[25U] = ws251; + ws34[26U] = ws261; + ws34[27U] = ws271; + ws34[28U] = ws281; + ws34[29U] = ws291; + ws34[30U] = ws301; + ws34[31U] = ws311; + for (uint32_t i = 0U; i < 25U; i++) + { + s[i] = Lib_IntVector_Intrinsics_vec256_xor(s[i], ws34[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____17 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____18 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____19 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____17, + Lib_IntVector_Intrinsics_vec256_xor(uu____18, + Lib_IntVector_Intrinsics_vec256_xor(uu____19, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____20 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____21 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____20, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____21, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____21, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____22 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____22, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____22, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____23 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____24 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v0 = + Lib_IntVector_Intrinsics_vec256_xor(uu____23, + Lib_IntVector_Intrinsics_vec256_and(uu____24, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____25 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____26 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v1 = + Lib_IntVector_Intrinsics_vec256_xor(uu____25, + Lib_IntVector_Intrinsics_vec256_and(uu____26, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____27 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____28 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v2 = + Lib_IntVector_Intrinsics_vec256_xor(uu____27, + Lib_IntVector_Intrinsics_vec256_and(uu____28, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____29 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____30 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v3 = + Lib_IntVector_Intrinsics_vec256_xor(uu____29, + Lib_IntVector_Intrinsics_vec256_and(uu____30, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____31 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____32 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____31, + Lib_IntVector_Intrinsics_vec256_and(uu____32, s[1U + 5U * i])); + s[0U + 5U * i] = v0; + s[1U + 5U * i] = v1; + s[2U + 5U * i] = v2; + s[3U + 5U * i] = v3; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____33 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____33, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + for (uint32_t i0 = 0U; i0 < 64U / rateInBytes; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + i0 * rateInBytes, hbuf, rateInBytes * sizeof (uint8_t)); + memcpy(b1 + i0 * rateInBytes, hbuf + 256U, rateInBytes * sizeof (uint8_t)); + memcpy(b2 + i0 * rateInBytes, hbuf + 512U, rateInBytes * sizeof (uint8_t)); + memcpy(b36 + i0 * rateInBytes, hbuf + 768U, rateInBytes * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____34 = s[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____35 = s[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____36 = s[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____34, + Lib_IntVector_Intrinsics_vec256_xor(uu____35, + Lib_IntVector_Intrinsics_vec256_xor(uu____36, + Lib_IntVector_Intrinsics_vec256_xor(s[i + 15U], s[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____37 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____38 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____37, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____38, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____38, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + s[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(s[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = s[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = s[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____39 = current; + s[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____39, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____39, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____40 = s[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____41 = Lib_IntVector_Intrinsics_vec256_lognot(s[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v023 = + Lib_IntVector_Intrinsics_vec256_xor(uu____40, + Lib_IntVector_Intrinsics_vec256_and(uu____41, s[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____42 = s[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____43 = Lib_IntVector_Intrinsics_vec256_lognot(s[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v123 = + Lib_IntVector_Intrinsics_vec256_xor(uu____42, + Lib_IntVector_Intrinsics_vec256_and(uu____43, s[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____44 = s[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____45 = Lib_IntVector_Intrinsics_vec256_lognot(s[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v223 = + Lib_IntVector_Intrinsics_vec256_xor(uu____44, + Lib_IntVector_Intrinsics_vec256_and(uu____45, s[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____46 = s[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____47 = Lib_IntVector_Intrinsics_vec256_lognot(s[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v323 = + Lib_IntVector_Intrinsics_vec256_xor(uu____46, + Lib_IntVector_Intrinsics_vec256_and(uu____47, s[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____48 = s[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____49 = Lib_IntVector_Intrinsics_vec256_lognot(s[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____48, + Lib_IntVector_Intrinsics_vec256_and(uu____49, s[1U + 5U * i])); + s[0U + 5U * i] = v023; + s[1U + 5U * i] = v123; + s[2U + 5U * i] = v223; + s[3U + 5U * i] = v323; + s[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____50 = s[0U]; + s[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____50, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } + uint32_t remOut = 64U % rateInBytes; + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, s, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v016 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v116 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v216 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v316 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v1_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v016, v116); + Lib_IntVector_Intrinsics_vec256 + v2_15 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v3_15 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v216, v316); + Lib_IntVector_Intrinsics_vec256 + v0__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v1__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_15, v2_15); + Lib_IntVector_Intrinsics_vec256 + v2__15 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 + v3__15 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_15, v3_15); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__15; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__15; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__15; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__15; + Lib_IntVector_Intrinsics_vec256 v017 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v117 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v217 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v317 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v1_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v017, v117); + Lib_IntVector_Intrinsics_vec256 + v2_16 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v3_16 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v217, v317); + Lib_IntVector_Intrinsics_vec256 + v0__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v1__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_16, v2_16); + Lib_IntVector_Intrinsics_vec256 + v2__16 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 + v3__16 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_16, v3_16); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__16; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__16; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__16; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__16; + Lib_IntVector_Intrinsics_vec256 v018 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v118 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v218 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v318 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v1_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v018, v118); + Lib_IntVector_Intrinsics_vec256 + v2_17 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v3_17 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v218, v318); + Lib_IntVector_Intrinsics_vec256 + v0__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v1__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_17, v2_17); + Lib_IntVector_Intrinsics_vec256 + v2__17 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 + v3__17 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_17, v3_17); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__17; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__17; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__17; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__17; + Lib_IntVector_Intrinsics_vec256 v019 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v119 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v219 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v319 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v1_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v019, v119); + Lib_IntVector_Intrinsics_vec256 + v2_18 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v3_18 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v219, v319); + Lib_IntVector_Intrinsics_vec256 + v0__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v1__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_18, v2_18); + Lib_IntVector_Intrinsics_vec256 + v2__18 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 + v3__18 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_18, v3_18); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__18; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__18; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__18; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__18; + Lib_IntVector_Intrinsics_vec256 v020 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v120 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v220 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v320 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v1_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v020, v120); + Lib_IntVector_Intrinsics_vec256 + v2_19 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v3_19 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v220, v320); + Lib_IntVector_Intrinsics_vec256 + v0__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v1__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_19, v2_19); + Lib_IntVector_Intrinsics_vec256 + v2__19 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 + v3__19 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_19, v3_19); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__19; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__19; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__19; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__19; + Lib_IntVector_Intrinsics_vec256 v021 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v121 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v221 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v321 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v1_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v021, v121); + Lib_IntVector_Intrinsics_vec256 + v2_20 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v3_20 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v221, v321); + Lib_IntVector_Intrinsics_vec256 + v0__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v1__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_20, v2_20); + Lib_IntVector_Intrinsics_vec256 + v2__20 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 + v3__20 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_20, v3_20); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__20; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__20; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__20; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__20; + Lib_IntVector_Intrinsics_vec256 v022 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v122 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v222 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v322 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v022, v122); + Lib_IntVector_Intrinsics_vec256 + v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v222, v322); + Lib_IntVector_Intrinsics_vec256 + v0__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v1__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_21, v2_21); + Lib_IntVector_Intrinsics_vec256 + v2__21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 + v3__21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_21, v3_21); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__21; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__21; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__21; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__21; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_22 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_22 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v1__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_22, v2_22); + Lib_IntVector_Intrinsics_vec256 + v2__22 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 + v3__22 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_22, v3_22); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__22; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__22; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__22; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__22; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b36 = rb.snd.snd.snd; + uint8_t *b2 = rb.snd.snd.fst; + uint8_t *b1 = rb.snd.fst; + uint8_t *b0 = rb.fst; + memcpy(b0 + 64U - remOut, hbuf, remOut * sizeof (uint8_t)); + memcpy(b1 + 64U - remOut, hbuf + 256U, remOut * sizeof (uint8_t)); + memcpy(b2 + 64U - remOut, hbuf + 512U, remOut * sizeof (uint8_t)); + memcpy(b36 + 64U - remOut, hbuf + 768U, remOut * sizeof (uint8_t)); +} + +uint64_t *Hacl_Hash_SHA3_Simd256_state_malloc(void) +{ + uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(100U, sizeof (uint64_t)); + return buf; +} + +void Hacl_Hash_SHA3_Simd256_state_free(uint64_t *s) +{ + KRML_HOST_FREE(s); +} + +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < inputByteLen / 168U; i0++) + { + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl1, b11 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl2, b21 + i0 * 168U, 168U * sizeof (uint8_t)); + memcpy(bl3, b31 + i0 * 168U, 168U * sizeof (uint8_t)); + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b_.snd.snd.snd; + uint8_t *b2 = b_.snd.snd.fst; + uint8_t *b1 = b_.snd.fst; + uint8_t *b0 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws1; + ws[2U] = ws2; + ws[3U] = ws3; + ws[4U] = ws4; + ws[5U] = ws5; + ws[6U] = ws6; + ws[7U] = ws7; + ws[8U] = ws8; + ws[9U] = ws9; + ws[10U] = ws10; + ws[11U] = ws11; + ws[12U] = ws12; + ws[13U] = ws13; + ws[14U] = ws14; + ws[15U] = ws15; + ws[16U] = ws16; + ws[17U] = ws17; + ws[18U] = ws18; + ws[19U] = ws19; + ws[20U] = ws20; + ws[21U] = ws21; + ws[22U] = ws22; + ws[23U] = ws23; + ws[24U] = ws24; + ws[25U] = ws25; + ws[26U] = ws26; + ws[27U] = ws27; + ws[28U] = ws28; + ws[29U] = ws29; + ws[30U] = ws30; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + +void +Hacl_Hash_SHA3_Simd256_shake128_absorb_final( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *input0, + uint8_t *input1, + uint8_t *input2, + uint8_t *input3, + uint32_t inputByteLen +) +{ + uint32_t rem = inputByteLen % 168U; + uint8_t b00[256U] = { 0U }; + uint8_t b10[256U] = { 0U }; + uint8_t b20[256U] = { 0U }; + uint8_t b30[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b_ = { .fst = b00, .snd = { .fst = b10, .snd = { .fst = b20, .snd = b30 } } }; + uint32_t rem1 = inputByteLen % 168U; + uint8_t *b01 = input0; + uint8_t *b11 = input1; + uint8_t *b21 = input2; + uint8_t *b31 = input3; + uint8_t *bl3 = b_.snd.snd.snd; + uint8_t *bl2 = b_.snd.snd.fst; + uint8_t *bl1 = b_.snd.fst; + uint8_t *bl0 = b_.fst; + memcpy(bl0, b01 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl1, b11 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl2, b21 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + memcpy(bl3, b31 + inputByteLen - rem1, rem1 * sizeof (uint8_t)); + uint8_t *b32 = b_.snd.snd.snd; + uint8_t *b22 = b_.snd.snd.fst; + uint8_t *b12 = b_.snd.fst; + uint8_t *b02 = b_.fst; + b02[rem] = 0x1FU; + b12[rem] = 0x1FU; + b22[rem] = 0x1FU; + b32[rem] = 0x1FU; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b33 = b_.snd.snd.snd; + uint8_t *b23 = b_.snd.snd.fst; + uint8_t *b13 = b_.snd.fst; + uint8_t *b03 = b_.fst; + ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03); + ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13); + ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23); + ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33); + ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 32U); + ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 32U); + ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 32U); + ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 32U); + ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 64U); + ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 64U); + ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 64U); + ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 64U); + ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 96U); + ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 96U); + ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 96U); + ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 96U); + ws[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 128U); + ws[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 128U); + ws[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 128U); + ws[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 128U); + ws[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 160U); + ws[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 160U); + ws[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 160U); + ws[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 160U); + ws[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 192U); + ws[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 192U); + ws[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 192U); + ws[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 192U); + ws[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b03 + 224U); + ws[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b13 + 224U); + ws[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b23 + 224U); + ws[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b33 + 224U); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws00 = v0__; + Lib_IntVector_Intrinsics_vec256 ws110 = v2__; + Lib_IntVector_Intrinsics_vec256 ws210 = v1__; + Lib_IntVector_Intrinsics_vec256 ws32 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws40 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws50 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws60 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws70 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws80 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws90 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws100 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws111 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws120 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws130 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws140 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws150 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws160 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws170 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws180 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws190 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws200 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws211 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws220 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws230 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws240 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws250 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws260 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws270 = v3__5; + Lib_IntVector_Intrinsics_vec256 v07 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v17 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v27 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v37 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v07, v17); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v27, v37); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws280 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws290 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws300 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws310 = v3__6; + ws[0U] = ws00; + ws[1U] = ws110; + ws[2U] = ws210; + ws[3U] = ws32; + ws[4U] = ws40; + ws[5U] = ws50; + ws[6U] = ws60; + ws[7U] = ws70; + ws[8U] = ws80; + ws[9U] = ws90; + ws[10U] = ws100; + ws[11U] = ws111; + ws[12U] = ws120; + ws[13U] = ws130; + ws[14U] = ws140; + ws[15U] = ws150; + ws[16U] = ws160; + ws[17U] = ws170; + ws[18U] = ws180; + ws[19U] = ws190; + ws[20U] = ws200; + ws[21U] = ws211; + ws[22U] = ws220; + ws[23U] = ws230; + ws[24U] = ws240; + ws[25U] = ws250; + ws[26U] = ws260; + ws[27U] = ws270; + ws[28U] = ws280; + ws[29U] = ws290; + ws[30U] = ws300; + ws[31U] = ws310; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws[i]); + } + uint8_t b04[256U] = { 0U }; + uint8_t b14[256U] = { 0U }; + uint8_t b24[256U] = { 0U }; + uint8_t b34[256U] = { 0U }; + K____uint8_t___uint8_t____K____uint8_t___uint8_t_ + b = { .fst = b04, .snd = { .fst = b14, .snd = { .fst = b24, .snd = b34 } } }; + uint8_t *b35 = b.snd.snd.snd; + uint8_t *b25 = b.snd.snd.fst; + uint8_t *b15 = b.snd.fst; + uint8_t *b05 = b.fst; + b05[167U] = 0x80U; + b15[167U] = 0x80U; + b25[167U] = 0x80U; + b35[167U] = 0x80U; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws33[32U] KRML_POST_ALIGN(32) = { 0U }; + uint8_t *b3 = b.snd.snd.snd; + uint8_t *b2 = b.snd.snd.fst; + uint8_t *b1 = b.snd.fst; + uint8_t *b0 = b.fst; + ws33[0U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0); + ws33[1U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1); + ws33[2U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2); + ws33[3U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3); + ws33[4U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 32U); + ws33[5U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 32U); + ws33[6U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 32U); + ws33[7U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 32U); + ws33[8U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 64U); + ws33[9U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 64U); + ws33[10U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 64U); + ws33[11U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 64U); + ws33[12U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 96U); + ws33[13U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 96U); + ws33[14U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 96U); + ws33[15U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 96U); + ws33[16U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 128U); + ws33[17U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 128U); + ws33[18U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 128U); + ws33[19U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 128U); + ws33[20U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 160U); + ws33[21U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 160U); + ws33[22U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 160U); + ws33[23U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 160U); + ws33[24U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 192U); + ws33[25U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 192U); + ws33[26U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 192U); + ws33[27U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 192U); + ws33[28U] = Lib_IntVector_Intrinsics_vec256_load64_le(b0 + 224U); + ws33[29U] = Lib_IntVector_Intrinsics_vec256_load64_le(b1 + 224U); + ws33[30U] = Lib_IntVector_Intrinsics_vec256_load64_le(b2 + 224U); + ws33[31U] = Lib_IntVector_Intrinsics_vec256_load64_le(b3 + 224U); + Lib_IntVector_Intrinsics_vec256 v08 = ws33[0U]; + Lib_IntVector_Intrinsics_vec256 v18 = ws33[1U]; + Lib_IntVector_Intrinsics_vec256 v28 = ws33[2U]; + Lib_IntVector_Intrinsics_vec256 v38 = ws33[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v1_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v08, v18); + Lib_IntVector_Intrinsics_vec256 + v2_7 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v3_7 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v28, v38); + Lib_IntVector_Intrinsics_vec256 + v0__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v1__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_7, v2_7); + Lib_IntVector_Intrinsics_vec256 + v2__7 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 + v3__7 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_7, v3_7); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__7; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__7; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__7; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__7; + Lib_IntVector_Intrinsics_vec256 v09 = ws33[4U]; + Lib_IntVector_Intrinsics_vec256 v19 = ws33[5U]; + Lib_IntVector_Intrinsics_vec256 v29 = ws33[6U]; + Lib_IntVector_Intrinsics_vec256 v39 = ws33[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v1_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v09, v19); + Lib_IntVector_Intrinsics_vec256 + v2_8 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v3_8 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v29, v39); + Lib_IntVector_Intrinsics_vec256 + v0__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v1__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_8, v2_8); + Lib_IntVector_Intrinsics_vec256 + v2__8 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 + v3__8 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_8, v3_8); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__8; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__8; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__8; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__8; + Lib_IntVector_Intrinsics_vec256 v010 = ws33[8U]; + Lib_IntVector_Intrinsics_vec256 v110 = ws33[9U]; + Lib_IntVector_Intrinsics_vec256 v210 = ws33[10U]; + Lib_IntVector_Intrinsics_vec256 v310 = ws33[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v1_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v010, v110); + Lib_IntVector_Intrinsics_vec256 + v2_9 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v3_9 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v210, v310); + Lib_IntVector_Intrinsics_vec256 + v0__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v1__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_9, v2_9); + Lib_IntVector_Intrinsics_vec256 + v2__9 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 + v3__9 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_9, v3_9); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__9; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__9; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__9; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__9; + Lib_IntVector_Intrinsics_vec256 v011 = ws33[12U]; + Lib_IntVector_Intrinsics_vec256 v111 = ws33[13U]; + Lib_IntVector_Intrinsics_vec256 v211 = ws33[14U]; + Lib_IntVector_Intrinsics_vec256 v311 = ws33[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v1_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v011, v111); + Lib_IntVector_Intrinsics_vec256 + v2_10 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v3_10 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v211, v311); + Lib_IntVector_Intrinsics_vec256 + v0__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v1__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v2_10); + Lib_IntVector_Intrinsics_vec256 + v2__10 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 + v3__10 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v3_10); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__10; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__10; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__10; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__10; + Lib_IntVector_Intrinsics_vec256 v012 = ws33[16U]; + Lib_IntVector_Intrinsics_vec256 v112 = ws33[17U]; + Lib_IntVector_Intrinsics_vec256 v212 = ws33[18U]; + Lib_IntVector_Intrinsics_vec256 v312 = ws33[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v012, v112); + Lib_IntVector_Intrinsics_vec256 + v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v212, v312); + Lib_IntVector_Intrinsics_vec256 + v0__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v1__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_11, v2_11); + Lib_IntVector_Intrinsics_vec256 + v2__11 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 + v3__11 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_11, v3_11); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__11; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__11; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__11; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__11; + Lib_IntVector_Intrinsics_vec256 v013 = ws33[20U]; + Lib_IntVector_Intrinsics_vec256 v113 = ws33[21U]; + Lib_IntVector_Intrinsics_vec256 v213 = ws33[22U]; + Lib_IntVector_Intrinsics_vec256 v313 = ws33[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v1_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v013, v113); + Lib_IntVector_Intrinsics_vec256 + v2_12 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v3_12 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v213, v313); + Lib_IntVector_Intrinsics_vec256 + v0__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v1__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v2_12); + Lib_IntVector_Intrinsics_vec256 + v2__12 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 + v3__12 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v3_12); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__12; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__12; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__12; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__12; + Lib_IntVector_Intrinsics_vec256 v014 = ws33[24U]; + Lib_IntVector_Intrinsics_vec256 v114 = ws33[25U]; + Lib_IntVector_Intrinsics_vec256 v214 = ws33[26U]; + Lib_IntVector_Intrinsics_vec256 v314 = ws33[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v1_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v014, v114); + Lib_IntVector_Intrinsics_vec256 + v2_13 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v3_13 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v214, v314); + Lib_IntVector_Intrinsics_vec256 + v0__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v1__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_13, v2_13); + Lib_IntVector_Intrinsics_vec256 + v2__13 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 + v3__13 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_13, v3_13); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__13; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__13; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__13; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__13; + Lib_IntVector_Intrinsics_vec256 v0 = ws33[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws33[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws33[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws33[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_14 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_14 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v1__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_14, v2_14); + Lib_IntVector_Intrinsics_vec256 + v2__14 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 + v3__14 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_14, v3_14); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__14; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__14; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__14; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__14; + ws33[0U] = ws0; + ws33[1U] = ws1; + ws33[2U] = ws2; + ws33[3U] = ws3; + ws33[4U] = ws4; + ws33[5U] = ws5; + ws33[6U] = ws6; + ws33[7U] = ws7; + ws33[8U] = ws8; + ws33[9U] = ws9; + ws33[10U] = ws10; + ws33[11U] = ws11; + ws33[12U] = ws12; + ws33[13U] = ws13; + ws33[14U] = ws14; + ws33[15U] = ws15; + ws33[16U] = ws16; + ws33[17U] = ws17; + ws33[18U] = ws18; + ws33[19U] = ws19; + ws33[20U] = ws20; + ws33[21U] = ws21; + ws33[22U] = ws22; + ws33[23U] = ws23; + ws33[24U] = ws24; + ws33[25U] = ws25; + ws33[26U] = ws26; + ws33[27U] = ws27; + ws33[28U] = ws28; + ws33[29U] = ws29; + ws33[30U] = ws30; + ws33[31U] = ws31; + for (uint32_t i = 0U; i < 25U; i++) + { + state[i] = Lib_IntVector_Intrinsics_vec256_xor(state[i], ws33[i]); + } + for (uint32_t i0 = 0U; i0 < 24U; i0++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i1, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i1 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i1 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i1 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i1 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v015 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v115 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v215 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v315 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v015; + state[1U + 5U * i] = v115; + state[2U + 5U * i] = v215; + state[3U + 5U * i] = v315; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i0]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } +} + +void +Hacl_Hash_SHA3_Simd256_shake128_squeeze_nblocks( + Lib_IntVector_Intrinsics_vec256 *state, + uint8_t *output0, + uint8_t *output1, + uint8_t *output2, + uint8_t *output3, + uint32_t outputByteLen +) +{ + for (uint32_t i0 = 0U; i0 < outputByteLen / 168U; i0++) + { + uint8_t hbuf[1024U] = { 0U }; + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[32U] KRML_POST_ALIGN(32) = { 0U }; + memcpy(ws, state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256)); + Lib_IntVector_Intrinsics_vec256 v00 = ws[0U]; + Lib_IntVector_Intrinsics_vec256 v10 = ws[1U]; + Lib_IntVector_Intrinsics_vec256 v20 = ws[2U]; + Lib_IntVector_Intrinsics_vec256 v30 = ws[3U]; + Lib_IntVector_Intrinsics_vec256 + v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); + Lib_IntVector_Intrinsics_vec256 + v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); + Lib_IntVector_Intrinsics_vec256 + v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); + Lib_IntVector_Intrinsics_vec256 + v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 + v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); + Lib_IntVector_Intrinsics_vec256 ws0 = v0__; + Lib_IntVector_Intrinsics_vec256 ws1 = v2__; + Lib_IntVector_Intrinsics_vec256 ws2 = v1__; + Lib_IntVector_Intrinsics_vec256 ws3 = v3__; + Lib_IntVector_Intrinsics_vec256 v01 = ws[4U]; + Lib_IntVector_Intrinsics_vec256 v11 = ws[5U]; + Lib_IntVector_Intrinsics_vec256 v21 = ws[6U]; + Lib_IntVector_Intrinsics_vec256 v31 = ws[7U]; + Lib_IntVector_Intrinsics_vec256 + v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); + Lib_IntVector_Intrinsics_vec256 + v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); + Lib_IntVector_Intrinsics_vec256 + v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); + Lib_IntVector_Intrinsics_vec256 + v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 + v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); + Lib_IntVector_Intrinsics_vec256 ws4 = v0__0; + Lib_IntVector_Intrinsics_vec256 ws5 = v2__0; + Lib_IntVector_Intrinsics_vec256 ws6 = v1__0; + Lib_IntVector_Intrinsics_vec256 ws7 = v3__0; + Lib_IntVector_Intrinsics_vec256 v02 = ws[8U]; + Lib_IntVector_Intrinsics_vec256 v12 = ws[9U]; + Lib_IntVector_Intrinsics_vec256 v22 = ws[10U]; + Lib_IntVector_Intrinsics_vec256 v32 = ws[11U]; + Lib_IntVector_Intrinsics_vec256 + v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); + Lib_IntVector_Intrinsics_vec256 + v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); + Lib_IntVector_Intrinsics_vec256 + v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); + Lib_IntVector_Intrinsics_vec256 + v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 + v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); + Lib_IntVector_Intrinsics_vec256 ws8 = v0__1; + Lib_IntVector_Intrinsics_vec256 ws9 = v2__1; + Lib_IntVector_Intrinsics_vec256 ws10 = v1__1; + Lib_IntVector_Intrinsics_vec256 ws11 = v3__1; + Lib_IntVector_Intrinsics_vec256 v03 = ws[12U]; + Lib_IntVector_Intrinsics_vec256 v13 = ws[13U]; + Lib_IntVector_Intrinsics_vec256 v23 = ws[14U]; + Lib_IntVector_Intrinsics_vec256 v33 = ws[15U]; + Lib_IntVector_Intrinsics_vec256 + v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v03, v13); + Lib_IntVector_Intrinsics_vec256 + v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v23, v33); + Lib_IntVector_Intrinsics_vec256 + v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); + Lib_IntVector_Intrinsics_vec256 + v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 + v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); + Lib_IntVector_Intrinsics_vec256 ws12 = v0__2; + Lib_IntVector_Intrinsics_vec256 ws13 = v2__2; + Lib_IntVector_Intrinsics_vec256 ws14 = v1__2; + Lib_IntVector_Intrinsics_vec256 ws15 = v3__2; + Lib_IntVector_Intrinsics_vec256 v04 = ws[16U]; + Lib_IntVector_Intrinsics_vec256 v14 = ws[17U]; + Lib_IntVector_Intrinsics_vec256 v24 = ws[18U]; + Lib_IntVector_Intrinsics_vec256 v34 = ws[19U]; + Lib_IntVector_Intrinsics_vec256 + v0_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v1_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v04, v14); + Lib_IntVector_Intrinsics_vec256 + v2_3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v3_3 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v24, v34); + Lib_IntVector_Intrinsics_vec256 + v0__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v1__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_3, v2_3); + Lib_IntVector_Intrinsics_vec256 + v2__3 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 + v3__3 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_3, v3_3); + Lib_IntVector_Intrinsics_vec256 ws16 = v0__3; + Lib_IntVector_Intrinsics_vec256 ws17 = v2__3; + Lib_IntVector_Intrinsics_vec256 ws18 = v1__3; + Lib_IntVector_Intrinsics_vec256 ws19 = v3__3; + Lib_IntVector_Intrinsics_vec256 v05 = ws[20U]; + Lib_IntVector_Intrinsics_vec256 v15 = ws[21U]; + Lib_IntVector_Intrinsics_vec256 v25 = ws[22U]; + Lib_IntVector_Intrinsics_vec256 v35 = ws[23U]; + Lib_IntVector_Intrinsics_vec256 + v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v05, v15); + Lib_IntVector_Intrinsics_vec256 + v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v25, v35); + Lib_IntVector_Intrinsics_vec256 + v0__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v1__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_4, v2_4); + Lib_IntVector_Intrinsics_vec256 + v2__4 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 + v3__4 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_4, v3_4); + Lib_IntVector_Intrinsics_vec256 ws20 = v0__4; + Lib_IntVector_Intrinsics_vec256 ws21 = v2__4; + Lib_IntVector_Intrinsics_vec256 ws22 = v1__4; + Lib_IntVector_Intrinsics_vec256 ws23 = v3__4; + Lib_IntVector_Intrinsics_vec256 v06 = ws[24U]; + Lib_IntVector_Intrinsics_vec256 v16 = ws[25U]; + Lib_IntVector_Intrinsics_vec256 v26 = ws[26U]; + Lib_IntVector_Intrinsics_vec256 v36 = ws[27U]; + Lib_IntVector_Intrinsics_vec256 + v0_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v1_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v06, v16); + Lib_IntVector_Intrinsics_vec256 + v2_5 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v3_5 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v26, v36); + Lib_IntVector_Intrinsics_vec256 + v0__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v1__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_5, v2_5); + Lib_IntVector_Intrinsics_vec256 + v2__5 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 + v3__5 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_5, v3_5); + Lib_IntVector_Intrinsics_vec256 ws24 = v0__5; + Lib_IntVector_Intrinsics_vec256 ws25 = v2__5; + Lib_IntVector_Intrinsics_vec256 ws26 = v1__5; + Lib_IntVector_Intrinsics_vec256 ws27 = v3__5; + Lib_IntVector_Intrinsics_vec256 v0 = ws[28U]; + Lib_IntVector_Intrinsics_vec256 v1 = ws[29U]; + Lib_IntVector_Intrinsics_vec256 v2 = ws[30U]; + Lib_IntVector_Intrinsics_vec256 v3 = ws[31U]; + Lib_IntVector_Intrinsics_vec256 + v0_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v1_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); + Lib_IntVector_Intrinsics_vec256 + v2_6 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v3_6 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); + Lib_IntVector_Intrinsics_vec256 + v0__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v1__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_6, v2_6); + Lib_IntVector_Intrinsics_vec256 + v2__6 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 + v3__6 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_6, v3_6); + Lib_IntVector_Intrinsics_vec256 ws28 = v0__6; + Lib_IntVector_Intrinsics_vec256 ws29 = v2__6; + Lib_IntVector_Intrinsics_vec256 ws30 = v1__6; + Lib_IntVector_Intrinsics_vec256 ws31 = v3__6; + ws[0U] = ws0; + ws[1U] = ws4; + ws[2U] = ws8; + ws[3U] = ws12; + ws[4U] = ws16; + ws[5U] = ws20; + ws[6U] = ws24; + ws[7U] = ws28; + ws[8U] = ws1; + ws[9U] = ws5; + ws[10U] = ws9; + ws[11U] = ws13; + ws[12U] = ws17; + ws[13U] = ws21; + ws[14U] = ws25; + ws[15U] = ws29; + ws[16U] = ws2; + ws[17U] = ws6; + ws[18U] = ws10; + ws[19U] = ws14; + ws[20U] = ws18; + ws[21U] = ws22; + ws[22U] = ws26; + ws[23U] = ws30; + ws[24U] = ws3; + ws[25U] = ws7; + ws[26U] = ws11; + ws[27U] = ws15; + ws[28U] = ws19; + ws[29U] = ws23; + ws[30U] = ws27; + ws[31U] = ws31; + for (uint32_t i = 0U; i < 32U; i++) + { + Lib_IntVector_Intrinsics_vec256_store64_le(hbuf + i * 32U, ws[i]); + } + uint8_t *b0 = output0; + uint8_t *b1 = output1; + uint8_t *b2 = output2; + uint8_t *b3 = output3; + memcpy(b0 + i0 * 168U, hbuf, 168U * sizeof (uint8_t)); + memcpy(b1 + i0 * 168U, hbuf + 256U, 168U * sizeof (uint8_t)); + memcpy(b2 + i0 * 168U, hbuf + 512U, 168U * sizeof (uint8_t)); + memcpy(b3 + i0 * 168U, hbuf + 768U, 168U * sizeof (uint8_t)); + for (uint32_t i1 = 0U; i1 < 24U; i1++) + { + KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 _C[5U] KRML_POST_ALIGN(32) = { 0U }; + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____0 = state[i + 0U]; + Lib_IntVector_Intrinsics_vec256 uu____1 = state[i + 5U]; + Lib_IntVector_Intrinsics_vec256 uu____2 = state[i + 10U]; + _C[i] = + Lib_IntVector_Intrinsics_vec256_xor(uu____0, + Lib_IntVector_Intrinsics_vec256_xor(uu____1, + Lib_IntVector_Intrinsics_vec256_xor(uu____2, + Lib_IntVector_Intrinsics_vec256_xor(state[i + 15U], state[i + 20U]))));); + KRML_MAYBE_FOR5(i2, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____3 = _C[(i2 + 4U) % 5U]; + Lib_IntVector_Intrinsics_vec256 uu____4 = _C[(i2 + 1U) % 5U]; + Lib_IntVector_Intrinsics_vec256 + _D = + Lib_IntVector_Intrinsics_vec256_xor(uu____3, + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____4, + 1U), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____4, 63U))); + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + state[i2 + 5U * i] = Lib_IntVector_Intrinsics_vec256_xor(state[i2 + 5U * i], _D););); + Lib_IntVector_Intrinsics_vec256 x = state[1U]; + Lib_IntVector_Intrinsics_vec256 current = x; + for (uint32_t i = 0U; i < 24U; i++) + { + uint32_t _Y = Hacl_Impl_SHA3_Vec_keccak_piln[i]; + uint32_t r = Hacl_Impl_SHA3_Vec_keccak_rotc[i]; + Lib_IntVector_Intrinsics_vec256 temp = state[_Y]; + Lib_IntVector_Intrinsics_vec256 uu____5 = current; + state[_Y] = + Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_left64(uu____5, + r), + Lib_IntVector_Intrinsics_vec256_shift_right64(uu____5, 64U - r)); + current = temp; + } + KRML_MAYBE_FOR5(i, + 0U, + 5U, + 1U, + Lib_IntVector_Intrinsics_vec256 uu____6 = state[0U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____7 = Lib_IntVector_Intrinsics_vec256_lognot(state[1U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v07 = + Lib_IntVector_Intrinsics_vec256_xor(uu____6, + Lib_IntVector_Intrinsics_vec256_and(uu____7, state[2U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____8 = state[1U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____9 = Lib_IntVector_Intrinsics_vec256_lognot(state[2U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v17 = + Lib_IntVector_Intrinsics_vec256_xor(uu____8, + Lib_IntVector_Intrinsics_vec256_and(uu____9, state[3U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____10 = state[2U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____11 = Lib_IntVector_Intrinsics_vec256_lognot(state[3U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v27 = + Lib_IntVector_Intrinsics_vec256_xor(uu____10, + Lib_IntVector_Intrinsics_vec256_and(uu____11, state[4U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____12 = state[3U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____13 = Lib_IntVector_Intrinsics_vec256_lognot(state[4U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v37 = + Lib_IntVector_Intrinsics_vec256_xor(uu____12, + Lib_IntVector_Intrinsics_vec256_and(uu____13, state[0U + 5U * i])); + Lib_IntVector_Intrinsics_vec256 uu____14 = state[4U + 5U * i]; + Lib_IntVector_Intrinsics_vec256 + uu____15 = Lib_IntVector_Intrinsics_vec256_lognot(state[0U + 5U * i]); + Lib_IntVector_Intrinsics_vec256 + v4 = + Lib_IntVector_Intrinsics_vec256_xor(uu____14, + Lib_IntVector_Intrinsics_vec256_and(uu____15, state[1U + 5U * i])); + state[0U + 5U * i] = v07; + state[1U + 5U * i] = v17; + state[2U + 5U * i] = v27; + state[3U + 5U * i] = v37; + state[4U + 5U * i] = v4;); + uint64_t c = Hacl_Impl_SHA3_Vec_keccak_rndc[i1]; + Lib_IntVector_Intrinsics_vec256 uu____16 = state[0U]; + state[0U] = + Lib_IntVector_Intrinsics_vec256_xor(uu____16, + Lib_IntVector_Intrinsics_vec256_load64(c)); + } + } +} + diff --git a/tests/kyber.cc b/tests/kyber.cc new file mode 100644 index 000000000..26714288a --- /dev/null +++ b/tests/kyber.cc @@ -0,0 +1,228 @@ +/* + * Copyright 2023 Cryspen Sarl + * + * Licensed under the Apache License, Version 2.0 or MIT. + * - http://www.apache.org/licenses/LICENSE-2.0 + * - http://opensource.org/licenses/MIT + */ + +#include +#include +#include + +#include "Hacl_Hash_SHA3.h" +#include "Libcrux_Kem_Kyber_Kyber768.h" +#include "util.h" + +using namespace std; + +class KAT +{ +public: + bytes key_generation_seed; + bytes sha3_256_hash_of_public_key; + bytes sha3_256_hash_of_secret_key; + bytes encapsulation_seed; + bytes sha3_256_hash_of_ciphertext; + bytes shared_secret; +}; + +vector +read_kats(string path) +{ + ifstream kat_file(path); + nlohmann::json kats_raw; + kat_file >> kats_raw; + + vector kats; + + // Read test group + for (auto& kat_raw : kats_raw.items()) { + auto kat_raw_value = kat_raw.value(); + + kats.push_back(KAT{ + .key_generation_seed = from_hex(kat_raw_value["key_generation_seed"]), + .sha3_256_hash_of_public_key = + from_hex(kat_raw_value["sha3_256_hash_of_public_key"]), + .sha3_256_hash_of_secret_key = + from_hex(kat_raw_value["sha3_256_hash_of_secret_key"]), + .encapsulation_seed = from_hex(kat_raw_value["encapsulation_seed"]), + .sha3_256_hash_of_ciphertext = + from_hex(kat_raw_value["sha3_256_hash_of_ciphertext"]), + .shared_secret = from_hex(kat_raw_value["shared_secret"]), + }); + } + + return kats; +} + +void modify_ciphertext(uint8_t* ciphertext, size_t ciphertext_size) { + uint8_t randomness[3]; + generate_random(randomness, 3); + + uint8_t random_byte = randomness[0]; + if (random_byte == 0) { + random_byte += 1; + } + + uint16_t random_u16 = (randomness[2] << 8) | randomness[1]; + + uint16_t random_position = random_u16 % ciphertext_size; + + ciphertext[random_position] ^= random_byte; +} + +void modify_secret_key(uint8_t* secret_key, size_t secret_key_size, bool modify_implicit_rejection_value) { + uint8_t randomness[3]; + generate_random(randomness, 3); + + uint8_t random_byte = randomness[0]; + if (random_byte == 0) { + random_byte += 1; + } + + uint16_t random_u16 = (randomness[2] << 8) | randomness[1]; + + uint16_t random_position = 0; + + if(modify_implicit_rejection_value == true) { + random_position = (secret_key_size - 32) + (random_u16 % 32); + } else { + random_position = random_u16 % (secret_key_size - 32); + } + + secret_key[random_position] ^= random_byte; +} + +uint8_t* compute_implicit_rejection_shared_secret(uint8_t* ciphertext, size_t ciphertext_size, uint8_t* secret_key, size_t secret_key_size) { + uint8_t* hashInput = new uint8_t[32 + ciphertext_size]; + uint8_t* sharedSecret = new uint8_t[32]; + + std::copy(secret_key + (secret_key_size - 32), secret_key + secret_key_size, hashInput); + std::copy(ciphertext, ciphertext + ciphertext_size, hashInput + 32); + + Hacl_Hash_SHA3_shake256_hacl(32 + ciphertext_size, hashInput, 32, sharedSecret); + + delete [] hashInput; + return sharedSecret; +} + +TEST(Kyber768Test, ConsistencyTest) +{ + uint8_t randomness[64]; + uint8_t publicKey[KYBER768_PUBLICKEYBYTES]; + uint8_t secretKey[KYBER768_SECRETKEYBYTES]; + + generate_random(randomness, 64); + Libcrux_Kyber768_GenerateKeyPair(publicKey, secretKey, randomness); + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + + generate_random(randomness, 32); + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &publicKey, randomness); + + uint8_t sharedSecret2[KYBER768_SHAREDSECRETBYTES]; + Libcrux_Kyber768_Decapsulate(sharedSecret2, &ciphertext, &secretKey); + + EXPECT_EQ(0, memcmp(sharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); +} + +TEST(Kyber768Test, ModifiedCiphertextTest) +{ + uint8_t randomness[64]; + uint8_t publicKey[KYBER768_PUBLICKEYBYTES]; + uint8_t secretKey[KYBER768_SECRETKEYBYTES]; + + generate_random(randomness, 64); + Libcrux_Kyber768_GenerateKeyPair(publicKey, secretKey, randomness); + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + + generate_random(randomness, 32); + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &publicKey, randomness); + + uint8_t sharedSecret2[KYBER768_SHAREDSECRETBYTES]; + modify_ciphertext(ciphertext, KYBER768_CIPHERTEXTBYTES); + Libcrux_Kyber768_Decapsulate(sharedSecret2, &ciphertext, &secretKey); + + EXPECT_NE(0, memcmp(sharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); + + uint8_t* implicitRejectionSharedSecret = compute_implicit_rejection_shared_secret(ciphertext, KYBER768_CIPHERTEXTBYTES, secretKey, KYBER768_SECRETKEYBYTES); + + EXPECT_EQ(0, memcmp(implicitRejectionSharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); + delete [] implicitRejectionSharedSecret; +} + +TEST(Kyber768Test, ModifiedSecretKeyTest) +{ + uint8_t randomness[64]; + uint8_t publicKey[KYBER768_PUBLICKEYBYTES]; + uint8_t secretKey[KYBER768_SECRETKEYBYTES]; + + generate_random(randomness, 64); + Libcrux_Kyber768_GenerateKeyPair(publicKey, secretKey, randomness); + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + + generate_random(randomness, 32); + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &publicKey, randomness); + + uint8_t sharedSecret2[KYBER768_SHAREDSECRETBYTES]; + modify_secret_key(secretKey, KYBER768_SECRETKEYBYTES, false); + Libcrux_Kyber768_Decapsulate(sharedSecret2, &ciphertext, &secretKey); + + EXPECT_NE(0, memcmp(sharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); + + modify_secret_key(secretKey, KYBER768_SECRETKEYBYTES, true); + Libcrux_Kyber768_Decapsulate(sharedSecret2, &ciphertext, &secretKey); + + uint8_t* implicitRejectionSharedSecret = compute_implicit_rejection_shared_secret(ciphertext, KYBER768_CIPHERTEXTBYTES, secretKey, KYBER768_SECRETKEYBYTES); + EXPECT_EQ(0, memcmp(implicitRejectionSharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); + delete [] implicitRejectionSharedSecret; +} + +TEST(Kyber768Test, NISTKnownAnswerTest) +{ + auto kats = read_kats("kyber768_nistkats.json"); + + uint8_t publicKey[KYBER768_PUBLICKEYBYTES]; + uint8_t secretKey[KYBER768_SECRETKEYBYTES]; + + for (auto kat : kats) { + Libcrux_Kyber768_GenerateKeyPair( + publicKey, secretKey, kat.key_generation_seed.data()); + uint8_t pk_hash[32]; + Hacl_Hash_SHA3_sha3_256(pk_hash, publicKey, KYBER768_PUBLICKEYBYTES); + EXPECT_EQ(0, + memcmp(pk_hash, kat.sha3_256_hash_of_public_key.data(), 32)); + uint8_t sk_hash[32]; + Hacl_Hash_SHA3_sha3_256(sk_hash, secretKey, KYBER768_SECRETKEYBYTES); + EXPECT_EQ(0, + memcmp(sk_hash, kat.sha3_256_hash_of_secret_key.data(), 32)); + + uint8_t ciphertext[KYBER768_CIPHERTEXTBYTES]; + uint8_t sharedSecret[KYBER768_SHAREDSECRETBYTES]; + Libcrux_Kyber768_Encapsulate( + ciphertext, sharedSecret, &publicKey, kat.encapsulation_seed.data()); + uint8_t ct_hash[32]; + Hacl_Hash_SHA3_sha3_256(ct_hash, ciphertext, KYBER768_CIPHERTEXTBYTES); + EXPECT_EQ(0, + memcmp(ct_hash, kat.sha3_256_hash_of_ciphertext.data(), 32)); + EXPECT_EQ(0, + memcmp(sharedSecret, + kat.shared_secret.data(), + KYBER768_SHAREDSECRETBYTES)); + + uint8_t sharedSecret2[KYBER768_SHAREDSECRETBYTES]; + Libcrux_Kyber768_Decapsulate(sharedSecret2, &ciphertext, &secretKey); + + EXPECT_EQ(0, + memcmp(sharedSecret, sharedSecret2, KYBER768_SHAREDSECRETBYTES)); + } +} diff --git a/tests/kyber/kyber768_nistkats.json b/tests/kyber/kyber768_nistkats.json new file mode 100644 index 000000000..6a819f80d --- /dev/null +++ b/tests/kyber/kyber768_nistkats.json @@ -0,0 +1,802 @@ +[ + { + "key_generation_seed": "7c9935a0b07694aa0c6d10e4db6b1add2fd81a25ccb148032dcd739936737f2d8626ed79d451140800e03b59b956f8210e556067407d13dc90fa9e8b872bfb8f", + "sha3_256_hash_of_public_key": "d4ec143b50f01423b177895edee22bb739f647ecf85f50bc25ef7b5a725dee86", + "sha3_256_hash_of_secret_key": "245bc1d8cdd4893e4c471e8fccfa7019df0fd10f2d5375f36b4af5f4222aca6a", + "encapsulation_seed": "147c03f7a5bebba406c8fae1874d7f13c80efe79a3a9a874cc09fe76f6997615", + "sha3_256_hash_of_ciphertext": "bb62281b4aacc5a90a5ccdc5cd3dbe3867c502e8e6ec963ab329a9da0a20a75a", + "shared_secret": "729fa06ac93c5efdfbf1272a96cef167a393947ab7dc2d11ed7de8ac3c947fa8" + }, + { + "key_generation_seed": "d60b93492a1d8c1c7ba6fc0b733137f3406cee8110a93f170e7a78658af326d9003271531cf27285b8721ed5cb46853043b346a66cba6cf765f1b0eaa40bf672", + "sha3_256_hash_of_public_key": "2cedad700b675e98641bea57b936bd8befce2d5161e0ef4ef8406e70f1e2c27c", + "sha3_256_hash_of_secret_key": "0a84cc895da138b944accbef3ff1a0004b8a0d8af5d426d2b82ea4c0e585cc6a", + "encapsulation_seed": "cde797df8ce67231f6c5d15811843e01eb2ab84c7490931240822adbddd72046", + "sha3_256_hash_of_ciphertext": "c15158a536d89bf3bafaea44cd442827a82f6eb772849015f3fec68a29d589dc", + "shared_secret": "c00e4ede0a4fa212980e6736686bf73585a0adf8d38fec212c860a0d3d055d1c" + }, + { + "key_generation_seed": "4b622de1350119c45a9f2e2ef3dc5df50a759d138cdfbd64c81cc7cc2f513345e82fcc97ca60ccb27bf6938c975658aeb8b4d37cffbde25d97e561f36c219ade", + "sha3_256_hash_of_public_key": "3dbc65b722a8982d058e27d409f04f744551ecde9015b62607cf67bb8ececbb8", + "sha3_256_hash_of_secret_key": "0ffced333b5d13fff22b81e66d57b6e2a6dba0285fe2a82d5537df51a8d3eac3", + "encapsulation_seed": "f43f68fbd694f0a6d307297110ecd4739876489fdf07eb9b03364e2ed0ff96e9", + "sha3_256_hash_of_ciphertext": "aec80e6fe21e2616352b4c148f9fa0e30986541fb0969df7873b1336b23a8de0", + "shared_secret": "8f50401bc9b1f857fd870902d4065f6cec8cb825db3eb22573c6167442b6e19b" + }, + { + "key_generation_seed": "050d58f9f757edc1e8180e3808b806f5bbb3586db3470b069826d1bb9a4efc2cde950541fd53a8a47aaa8cdfe80d928262a5ef7f8129ec3ef92f78d7cc32ef60", + "sha3_256_hash_of_public_key": "94391b7a41175a41c15cd995ebc69c83b29e4bcea6c186611dc4a79578e37f4c", + "sha3_256_hash_of_secret_key": "e3904266e186b34a397014c95f6d314cd6e1c813348b02e977d0fd21d9bb681b", + "encapsulation_seed": "ea74fbc3c546500ed684bed6fe3c496d3b86d2d6dfaf223969b942e9a8c95e85", + "sha3_256_hash_of_ciphertext": "39fa8e1d0a5e4bb987618734ee4903771886030b2d8bea4b5a9b0cb672ebb279", + "shared_secret": "3221d7b046caccbded38e369625f69bac60c2d7efacad8f24170b10c5d222830" + }, + { + "key_generation_seed": "66b79b844e0c2adad694e0478661ac46fe6b6001f6a71ff8e2f034b1fd8833d3be2d3c64d38269a1ee8660b9a2beaeb9f5ac022e8f0a357feebfd13b06813854", + "sha3_256_hash_of_public_key": "c5dbd68b3a8c148b2e7ac049bb986e14dd1cebfa1cbf3edd6bae85a4d2dda082", + "sha3_256_hash_of_secret_key": "b3fa7958f4b7ccb68712ae948c3f08740c8b89a69e53ad4e9959234e6869d8fe", + "encapsulation_seed": "64efa87a12cb96f98b9b81a7e5128a959c74e5332aaab0444fca7b4a5e5e0216", + "sha3_256_hash_of_ciphertext": "ca9f95c38dc95f51b6b62ec709539f0d1e9fa64e49ce4ad10bbe62868f35cfc5", + "shared_secret": "1d746afc4160c75aaa6c6967f4eee941e09546a039027f05f0f8a483710ac334" + }, + { + "key_generation_seed": "7ec408f52c9aa723d0c41d9987682a5f4ce6c9da7cd0215af60bbaf5484ab353a08ccf451b049fd51d7a9ad77ae14a81569df8c9bd3a8f1ebea86fdcfb823082", + "sha3_256_hash_of_public_key": "62e0447f7b5ae8a806b741ca5c302230b555c3786c11f3eb43894a8f45e3f7b1", + "sha3_256_hash_of_secret_key": "1a3249c268754c86d2e02ba9d87c2b60b220bf2406b71037cfaf6b089477ffb4", + "encapsulation_seed": "8a95d71228acaa5f9ae6f9d9ca8ae55fde296463b41083a39e833e37c4c90f88", + "sha3_256_hash_of_ciphertext": "ec7bb1327a69aeaf626a76d344be1156eac160262128a64477a194805b926233", + "shared_secret": "722fccef7142c46f74eb57a10b13e420d6554e9d18507f660bd1be96d3cebbcc" + }, + { + "key_generation_seed": "c121915bfef6abdfc177dae2f5a24218f9abda2559afc6741b08e0e61ab433eb84ef52db5eaa6df8ec3a0bc5ffa730db0dde8c5f38f266d5c680a78d264a7b96", + "sha3_256_hash_of_public_key": "0c1d832af7b7282d8bd81a2237107ee60d81e28eb64d6a153ae0eaa1a25797c2", + "sha3_256_hash_of_secret_key": "fd6b5d3f120ca009871ca24552a6118917ea882f12f30dc8097f6614d9d36080", + "encapsulation_seed": "90d79d75d0bbb8921cf70d46bab497022a8e750efdc99e5f1bae653275441c7b", + "sha3_256_hash_of_ciphertext": "da36cb6137a777acb4afbc0932811f75ef1d6732031309ae7e2de1543aaf5c2c", + "shared_secret": "ee7c5fb6a63ace944e1eae1bd4b182263d918754c33753b904853551b2b46cb8" + }, + { + "key_generation_seed": "d86634ecf96cc2603761e284c0e36734cedec64e7ff486469e38539c71141c5a99daf37400cfe59841afc412ec97f2929dc84a6f3c36f378ee84ce3e46cd1209", + "sha3_256_hash_of_public_key": "2b757ac0425152bef72ed852ab1eb44f4359499407bb6a020ff843a31657c5fe", + "sha3_256_hash_of_secret_key": "27dbbc7918c31e9ab57808f439c4f4189cc318a62422457f4fed733be959c816", + "encapsulation_seed": "be8a32f97b9a8d596382c02fa2a0eeebc15c083e970ddaa4f2622b91d6718663", + "sha3_256_hash_of_ciphertext": "85efbfd0b096fa921711ea66b17bcf7c9a6240711b38a88830dbd9d716f07195", + "shared_secret": "77cfbdae47854e9e10765cf397eca9ab2bf2b7522817152b22e18b6e09795016" + }, + { + "key_generation_seed": "0610678ff4dc3128e1619f915dc192c220f8fad94da1943b90aaec401683a492da1804ddb5aa9b1c6a47a98f8505a49bae2affde5fe75e69e828e546a6771004", + "sha3_256_hash_of_public_key": "53b9d62e64f9069d9fb94ea2c0806459b201531f4fddd708d162981cc1fb3757", + "sha3_256_hash_of_secret_key": "f4b964b7ab3e09fdf3d91527da06a4d29ef28344709a41739ef56f18bd5b984b", + "encapsulation_seed": "da2cfaf69e25b2a89ff2557bbb6f69e01d8e2e7bb27a7a1ce7e40fead16f33b2", + "sha3_256_hash_of_ciphertext": "379a57a8f19110d5e0d747a2c184877d71f00fea95cd815b4c0e8782b12bec6f", + "shared_secret": "8be7a417efbdd3587c6f82ddd1d29956789d28c2413b8383590c5b80cc53e04a" + }, + { + "key_generation_seed": "d322d56d8ef067ba1f24c92492b9c56df3a6ef54a304adc1b69913766a1ce69756047447b810cc094d400ab204cf9ae71e3afa68b88586ecb6498c68ac0e51b9", + "sha3_256_hash_of_public_key": "9cfeca12dfe978bf0b7ad7271487cf61b2b8f7c60f389f33fc18439a95bcbb63", + "sha3_256_hash_of_secret_key": "a2e37a55c9b80fb423f40585180b011f32402d0320259285b6e278df6c20ba60", + "encapsulation_seed": "511c2ab40782322c06111e144e505328c4e5bfc890a5980a2bbc44aeda4c738b", + "sha3_256_hash_of_ciphertext": "44053f01ecb88811b9ee7a9ddd4234f94507c7cf64b6803b28c54bc605ec4e31", + "shared_secret": "79fcd201101e7e277c1b6cdc4475d63ea1dbc42ab94cf873bf0163c2aab0b5ff" + }, + { + "key_generation_seed": "2f1d8a3bebb34540324b9485fdf3d5be3b858f544abc3fc641b5728cafab03ba8d6c42e7270ee2b77b6045385f3d175984a0e260363166c73b0c70c971644363", + "sha3_256_hash_of_public_key": "9aa64a30bed5aa8300772066ef577f79bf4813e3315a15f2c28b2665e4dc7e2f", + "sha3_256_hash_of_secret_key": "837eb6ce037f235273d7686fd9d01bea14026e0a0f5f943884f18409cc4bc70a", + "encapsulation_seed": "dca92dbec9b260dd97e8886f876862d6effc3b91fcf3fbc986cf56ab93ae79a2", + "sha3_256_hash_of_ciphertext": "02798b5af1a76a2b478ee05c630e62618e5e2d7ee0c411a82ed2bf888706fe28", + "shared_secret": "6c4484b6d7b0a376f52abb1811c712368a9f34bd108ffe7ca31c36a6ec8140f3" + }, + { + "key_generation_seed": "31beda3462627f601cbc56f3ddf4424e1529c04737ef0ef2af6d7401f653b8a1812083bfa3b670e3eaf9b443702fb6db16ac1197656bbd61a8e25ed523b8d1e5", + "sha3_256_hash_of_public_key": "241e5c7b836862d7482d507973ae3fd8dae96eec4ecebcedb68fbda75e04b401", + "sha3_256_hash_of_secret_key": "95c79c2a867b3e8a4e4e545ff626cd49893b8e87eb188ed1516b159a24736c97", + "encapsulation_seed": "57c170e691d7a914a901b9a11c62b8b569b3806427557a9dbac9faa720ec3641", + "sha3_256_hash_of_ciphertext": "cf3b2e2dc822949eb13638299fc2d5102c7132aa6cd54dd7834b13f05a4dece2", + "shared_secret": "8554d6af350f13471cfd45c23882e43dc81d8a094f6299e2ad33ef4c01a32058" + }, + { + "key_generation_seed": "cbdff028766d558af4466ef14043a1a9cf765f7748c63cc09dceb59ab39a4e4d8e9a30597e4b52ffa87a54b83c91d12a5e9c2cd90fcac2c11b3a348240411a4c", + "sha3_256_hash_of_public_key": "6ad1d739f1598a16c608a240cd13dfaf8263d74866315e2898a3431cf19e4685", + "sha3_256_hash_of_secret_key": "1ef733faa4f2cb53cb5d8975aa6797b5f37fd918aeda02178a40584475cdf667", + "encapsulation_seed": "6b5a14e1473abf5a33d44975ca2088bd8fa6fddcb3f80e8fd5c45b9d90c24a5c", + "sha3_256_hash_of_ciphertext": "1706e6983032950b47cb6c8586178b42d515ce929c1434c1a8c9e36d8b4db7a3", + "shared_secret": "f9646f73de3d93d8e5dc5beeaa65a30d8f3a1f8d6392190ee66ff28693fbadfa" + }, + { + "key_generation_seed": "4c04310bea66305c6ca8ba6b8f61ca96257a67663afc11761f13fb5c7b324b6b8aec87a9a79204cee2986867a2906eb851b734b8b22b91d6749b1a5f07c44e3b", + "sha3_256_hash_of_public_key": "9510a2a0b4fcbd414fc61aff04a8df579660d14b13c40ec0470c45f639b65a58", + "sha3_256_hash_of_secret_key": "0bcfa8078582f60e218047d0016437601da8431f34ae6da12921f53958f32819", + "encapsulation_seed": "40e593754e6eddb7f9cf176ba2d5fd1087c90ad377556d0b0f686537b1a3165e", + "sha3_256_hash_of_ciphertext": "f9341d26e39b38a88ddef1708c96ee2068f569a59a4010745730d8290d637718", + "shared_secret": "1ee252e97b69445f7f109187645cd2879f55e10eb8361ab43b3492ff51f01815" + }, + { + "key_generation_seed": "38a0d5f41d7dc1896efd1b45b0485634cef149828751b96087a0a6dd81b4d58aa2acf359556df4a2abaeb9dcee945829beb71185b4d6bd18b76e5668f253383a", + "sha3_256_hash_of_public_key": "cfbe9649d9d1c384baad67b91b2f3e21f2fadd6bb582a0b9cb016051dd82c75a", + "sha3_256_hash_of_secret_key": "09b118f7c4d059baf27284d127d4e85d55b84e4c92bf3127eeb318d2f5765401", + "encapsulation_seed": "c152523abd8248bed40c3827bcf0f8e8127037a55c780695e2c28ea3e041a44c", + "sha3_256_hash_of_ciphertext": "94a8c287238191a107e74e31ec099086d83f198e6b0f3321da4d8f46ce01a0b2", + "shared_secret": "1e1ea5d6a18873c5c7fc8da79093f6d3db5b28fdd0aaa42726ad130c78e9bb88" + }, + { + "key_generation_seed": "97b5665676e59e3538ebadaa8cd50df1f9fda1502d9894c616a946078e56b621df05318b5f655efe36f1b678cf4b875108a18db2fa312261caf839f84bd956c5", + "sha3_256_hash_of_public_key": "a19c2c9c907b129d01cc44a95949121c39534cc98b6d105e60fe519a000cc2ae", + "sha3_256_hash_of_secret_key": "f1c00070780a7a2ac5b57ff3ff765ca75278bb661d1635cac92792f9454fe8ba", + "encapsulation_seed": "ad6466dd59f26b762fb02b19eedf5f79964da68bce0459b91c3a6ee5a7e01183", + "sha3_256_hash_of_ciphertext": "56e0b8ab3b302fae682938a45d9931e092d78877d1f8834bb43cd5c85582a205", + "shared_secret": "24619bb17c912fc992bd8272969cd5b6fd6b030122ee5af9365cac8b38e569fc" + }, + { + "key_generation_seed": "ef99224a03a85a46ef115474ec5b5d620da6795d6efcca4c9135d19958a9de62df7d92dda83e6b2ef4cce08c9134563063068a196d7b1a1a13623e48ae12528e", + "sha3_256_hash_of_public_key": "e4174b6e7542fbe80ab2bc06dfb802f691aff147ff90332d5ea739216c18d872", + "sha3_256_hash_of_secret_key": "f3f3a292f5cf01d6f7266461c9e8cd44bfc8f17e16035ab8d10af8177f389b86", + "encapsulation_seed": "1a4d5dff5847cfb48333e33bb00ca7301b144aa89dcd412ff5a3b1081d775b7f", + "sha3_256_hash_of_ciphertext": "5f878ca21c8c27ae9c41c43aaf1f3a2af62c73296e165c08b88c5b22592867be", + "shared_secret": "a990af801ddcf2009c82fe657fe3f068bae7e6bfc661e3e588354ba7d1b176e6" + }, + { + "key_generation_seed": "b12f6fd965ea9c5b947db80fc60c83d5e232dca82e7263027c19bd62e5a6ff550f6aa3e88f7fa8a96067f8cdaeceeac90c2d0b5e277e56e9c405ec9420c30252", + "sha3_256_hash_of_public_key": "2006a70fa33ff4a65b00553734c5bd8cca0a65eb3a115d96b8aa90f8fdc5f8f4", + "sha3_256_hash_of_secret_key": "7334d4a1755e1e639b3e9eadb5996cd910b55d1de5790469f229231d3bfb1528", + "encapsulation_seed": "34f44ec2092eeaf686f2ea170591a98527cbb03a4fa9477a7aef6b41a54feeb2", + "sha3_256_hash_of_ciphertext": "c2079637916c089b2afb9d6e9c6fa51308ab7720d5c2fca484c34ce614a14fc0", + "shared_secret": "11a2ceaa0c77f0602c4b2be3499e6df6b0339d9de90d04b2b12829f4758afaa5" + }, + { + "key_generation_seed": "9f52af92ca165fdc38788f2b59ba02e01c8281ff7c1e60504688043a5fe814b04f3029e1be4e1c0258c3a22ff5b50b2674cc094ba7018da2a61569845c17d26f", + "sha3_256_hash_of_public_key": "631e1de2556ae65d57e600c21e8e355a4ed586d667177ca0b7545cb5a23d669f", + "sha3_256_hash_of_secret_key": "3d4d2c680a1e6aa83861ad95043ded260e720ae80060320feffa309b4281ba3d", + "encapsulation_seed": "6250c81126572eec2da330271db36ee591f060fc7e53eeefe2e1c476c675fa33", + "sha3_256_hash_of_ciphertext": "2e9d6551050e32e204d7c062a4c18b8abdb91346e9f2c2708776827e0be4c514", + "shared_secret": "7571990ef1ef7e15cc920318fb75fd38c4ceb9abf7a4b1adc2175f99d1a0a275" + }, + { + "key_generation_seed": "851ea90fd3854cbf28fe39fb81f68e4b14345cf0d6eee7ec4ce772513df8410d1c0ec046899a777655233e4e1b5ca44e9afbdc67964bfd5d5e3dbb45e60d03cf", + "sha3_256_hash_of_public_key": "87f3829eff562789b3e19fafec92e4b5f95b45f3786f12d9c24915ca484a49ce", + "sha3_256_hash_of_secret_key": "9aa6c0546cf02085e2b3af65a7d7fd32d0f6d8080e1e7fbff6c39bcf3086ece4", + "encapsulation_seed": "35d470bcc5880872754810dfb3f2796da2fd7f397537146f6488c27804072b34", + "sha3_256_hash_of_ciphertext": "14da42e207477f4383faf4004e58675f0380e7d621421b3c36b877acf3a45d5a", + "shared_secret": "27ba4cb50ae44cd938585e0a4905d76053dd851e5b6af4fd787446079aa5a4ab" + }, + { + "key_generation_seed": "d304c9389cc973477f169788abcb9d511f843219d246a9b587822f422a70c2386590a2e5c7ed86cf2c5c2a898662bc9a81418720bbb632ef9cf0b845ed052d73", + "sha3_256_hash_of_public_key": "699fb2f061a75f111f4a7a60195d9045dc01716b6502cc107cbcedf122e8f619", + "sha3_256_hash_of_secret_key": "421f16805b1ceffcd64128b1296521ef812d3a8f4c5e3875a049f8de456b021a", + "encapsulation_seed": "8d667921c5db401a86fe1c35dfcf164a6bb2ab7400fd6a0b67eafd4a0ed11940", + "sha3_256_hash_of_ciphertext": "b2485ef56c39d468193e387e72794e0ddc9b5404c1a6d90c3b94a5f3e13ba7b4", + "shared_secret": "d17b2738213a98f29ee46747c93308ee7000fa404b9a0c1acf3f89654ca2446e" + }, + { + "key_generation_seed": "89a6e3be304a3518fb82b18ca730f0b359cd6ba90664a493fb4f8edaf965b9c3b6591121e25d64010c25a18676033e1d7278ac5f2d0b43a31f3a4156ae710465", + "sha3_256_hash_of_public_key": "d3413880d082f26986fcf452a84a8da934ed06198b290ada1789e74d9081a9e7", + "sha3_256_hash_of_secret_key": "7b546a42ffe6b65cd9c5b8857c2518f4f8e0bf835c894a68d1743691fc9aad9d", + "encapsulation_seed": "ec750b3939385a3f8df868119dc76f77ca845567ef068de6ada5478a56bc78b6", + "sha3_256_hash_of_ciphertext": "8290f3c4bec7c3b93f3d26e0be3b3fbfdd9c3f5806188fcf0fa1339133f29c7d", + "shared_secret": "954af53b4add522514b34cd2ab96669a76ca13f82aa2fd70826bc8ee790ccefb" + }, + { + "key_generation_seed": "d569b935ce015c85f792f8f7fb0d83c4f53b492959361dd4f75fb764d656450176eae84d11c4528382828f7a689a0d5cff87b8ca0bba97feacb39b935a8788cb", + "sha3_256_hash_of_public_key": "e6eec2929feac2a86c9dacfa6214e2e353fda2d547c3829f5678025ff8418a1a", + "sha3_256_hash_of_secret_key": "5fac243c82807d7357a61023226a7c270525d96932162ca5c09fc8f7b9ec6cb3", + "encapsulation_seed": "74f1d52af09b12c36eb062ea7528550cb4c18a3ce8e4f4ea9fac43ae383bc925", + "sha3_256_hash_of_ciphertext": "f1b10c800a42ae606c72eaad76accf059cccc02299fbd78a5d091f183f6c3f0e", + "shared_secret": "d0bbc576fb1aa43b6e76db0e87bc4ee3fa057c31642b37f3339217a1b041b521" + }, + { + "key_generation_seed": "5cbb141c2763425c274f7404fe530d9116e08c33f9f200a20b011cf563a28990fc9ebbe336dc464489861db8253606971bd0a9008a433ed17752d04023781552", + "sha3_256_hash_of_public_key": "c74f3b7fa6e2ef8ce99508c89cf3c71d666ab065a262581a5fb01b2c9b9444fa", + "sha3_256_hash_of_secret_key": "5c6998a20960109a4c9808f8f8575697b2b8d18c44c7e9dff97585ae43e6004c", + "encapsulation_seed": "4b3a70d85f640d1a2a852fb6fe96704af56a7415a8ee4282e9207bc3a2dc116a", + "sha3_256_hash_of_ciphertext": "e9ef0852ee47744b8c3e12cd728d9017465014eef51edf83a4502cb5218cee20", + "shared_secret": "91fbc37d4749ec6175c12f0d8eb6b6a8621e693c79f85f5cd2f557cafec5e7e9" + }, + { + "key_generation_seed": "293abb6d1c207927945417cf84883ef010823e11b487ed55239e466e83696d0cff8563038aad865a817cab9ce98846ba75be9363718ecf5fea538aea90b2a558", + "sha3_256_hash_of_public_key": "7378ef967195c977d43a50d03205044006715a6a8a8263d717f40170b49e6bd0", + "sha3_256_hash_of_secret_key": "30bd5f16c3f242248a4c4cddc43508bf54535958657bda4dcf105216ddf47eb0", + "encapsulation_seed": "26e38ac804fb5b4d59ddf747715e7e6041d875f99c7b638024b4af82d622da60", + "sha3_256_hash_of_ciphertext": "37843616c8a4f7ea9480740b6624f41650da2bb1664cf228d85d6d71a0624528", + "shared_secret": "d586b441b8eaf7d053cc96b6835f093426677a7c3acc51aaa3ddbb66dd14a623" + }, + { + "key_generation_seed": "74d87c7556f2671f2d666854a4d6e073e69f35421e6e1a428cccea49c37f972ce1fb7456ac0aa1b97068f452cba64ebdc138bcf5d36b0a0fada2a3b374141eb9", + "sha3_256_hash_of_public_key": "16fe956be4601573d72306a251f69bc2181253e2417e178341fd6553303ac189", + "sha3_256_hash_of_secret_key": "873c94f8bee9fe37265d5dc0c5d3bc1c706057c7efb3cd2cd5ca9ba45498d0d1", + "encapsulation_seed": "a319d2b8f114f1acd866478bcdeba6fd164dc4e37b0adfa8d8034afb3e197376", + "sha3_256_hash_of_ciphertext": "cc677a81c73ea5139eed8d85782978d06192715933bc5aef560e737f6d57d0a7", + "shared_secret": "409bfd9102bd4632c6b5d3610eb349fe3e3bc51e73acc78a8e994a070e20e10c" + }, + { + "key_generation_seed": "013bab0212d04ecd54b478daf72748003a25e2cb060ba6cc50bf95c292b8206b9da0c5da5f195b80fbb99c2e8b06926074f3f604b3f6195b5a5b9737876bba72", + "sha3_256_hash_of_public_key": "633bee89571e8fc16151491ea71234ab83289426559f90c67903a36e4afaa6f4", + "sha3_256_hash_of_secret_key": "3c3cff5f49a802cec693efbfc264f6a385210b1eed20f7bc5b07b51839961d14", + "encapsulation_seed": "ff646071b2509e6b75790917e08e4f0b0d9f0116ec6291c0b59eaa4b583ad830", + "sha3_256_hash_of_ciphertext": "6d94a31cff4761e3993308cb3e812a4a7f04f64d02ed3b46b418c2fc16189dfa", + "shared_secret": "5dd151a8015c0b16d79822832ff4cc0da7fd38eb73b7da59bc519d4d2374b808" + }, + { + "key_generation_seed": "ccb073c4b90be0ad746e26fb093b60c70110bd1dcbcddb566a8cffb7b3caf80e71600a8982c350df524cde514431ded7aec23576530894bcbf0ec0bfef0bb64f", + "sha3_256_hash_of_public_key": "3217d034b472a846cd317681c0f36feea187bd40e546dc4ad69c2e67fd9d8303", + "sha3_256_hash_of_secret_key": "1503bc141825d523c9505d34f50dc0a01d7bc91cdaee6b99f4a85a24ce800496", + "encapsulation_seed": "0584270ec26f3b9818e4af074d17b2d51037cc8dfdcbe3b140fa4fed5deebc54", + "sha3_256_hash_of_ciphertext": "a63613ccfd2ecf8aa3adf0103ddd9eeedbde3282443bcf02513b4ab87360cabb", + "shared_secret": "1c729b8e580e124e715f19ea6f2409fc6de741afa3d9919b2b8bf3e54c053b51" + }, + { + "key_generation_seed": "2e889f44e28901e9ac7ca6b2fffcb124c8979401b17064d7e1d51a7e3c3adbfa0e145e44aae52cfc609e6f47fd7a6f6af877190ff52256d0ac5b05b89c3f449f", + "sha3_256_hash_of_public_key": "d1756ecfaeb695001ac490f36c4638151bee98d367fb7adf0e06a470844068af", + "sha3_256_hash_of_secret_key": "a21acea0fd4354eb0c78d47caaf93c9f2434f1cf2d6b2194871ccd98f9522ced", + "encapsulation_seed": "51e05c7b4ca3079781e8293f4eccebeeb2f8c8b4c59468eddb62a21bcb4ab8a3", + "sha3_256_hash_of_ciphertext": "3b322134b37fe8f5d7268fb74d1634ab8b35d456a973f7b0b427fb40a93b6db2", + "shared_secret": "b95ac8b73c703ab1154152b3ac73f054596ed23d3be328fbe20f936ea95fa926" + }, + { + "key_generation_seed": "174aaa36410566dc15a5e62874218d7abdde0b2c0f30d877bb80b1abd5f5a0a450a7a2354f7e5cefa6f4a4e9a1c411eb9364506e9e1204a8acb3cb77fbd2c4ed", + "sha3_256_hash_of_public_key": "1b1b0a8682caf72df2e0a48513a7358edbc77a615d6be6fe2a7145be66b7c509", + "sha3_256_hash_of_secret_key": "3e214f25fbf4d1bb670a87367399e1b2a9da3491cac5a22a2c18dcc44f3f1bae", + "encapsulation_seed": "9eca0fe36c80fc5eba171c3ae66a5b1c923faa50b4521bb055e7bf51005c93df", + "sha3_256_hash_of_ciphertext": "a2cd589c24c4c75bc0a3864dc84a85a7f0f3ac11c8578757f8e94054a7c186aa", + "shared_secret": "8c3851393e5c5997cc95f06da96300f6dd85c041343c98db2e742aaa5f78b298" + }, + { + "key_generation_seed": "351fe4313e2da7fac83d509f3103caf7b4c64a4d458fefdf636785ac361a1390f072d9b5a99f9c7a0a011e4dc10f6b600d611f40bba75071e7bee61d23fd5eda", + "sha3_256_hash_of_public_key": "2c54df6e9020e1e44b11b471dea97a382a2fe8d1042565bcd51ef21cc0884d68", + "sha3_256_hash_of_secret_key": "c6bc9c9e797a02684d3ad8de47919b8d8fdbee09258d084c7a9dc963c80401ac", + "encapsulation_seed": "0c5719261caab51ae66b8c32e21c34e6d86ee4aa127d1b0195663c066497b2e9", + "sha3_256_hash_of_ciphertext": "0cd687f1c3e0d67c46cebf93c1217ddc972ad8662dd05830db350e1292542c1c", + "shared_secret": "4b681fff6a755e1dda908d070f0d9ac610d85c73079c1022fc67d255e36f1f71" + }, + { + "key_generation_seed": "9bc5315580207c6c16dcf3a30c48daf278de12e8c27df6733e62f799068ad23d5a4d0a8a41c4f666854e9b13673071ceb2fd61def9a850c211e7c50071b1ddad", + "sha3_256_hash_of_public_key": "bdcaf7b417da8b8933279b33068f6fda313826c2eec500b224cbe046abeb37a7", + "sha3_256_hash_of_secret_key": "c96e176b19f4135add434d0dd219024587d49fdb649bf470e84d9518bbfa2879", + "encapsulation_seed": "0e59f6f9047c784c1f00b24454aa4f1bd32c92ae7e626549972f86fab90e7e89", + "sha3_256_hash_of_ciphertext": "b38711e358893a864b475f35328b2450fffd5087d631844f7ab0995de2b8310d", + "shared_secret": "bbaa67f1dad879f2fb33bd4ead45aec354bc8f05c7cbea1e433509faac022edf" + }, + { + "key_generation_seed": "d8b907b34d152ff8603b73051f772daa71eb902c47b7e2f070508269d757e02e36b817736cbc5f7b1dd6eef5fe6332fb1a598f3871e5470d440fd2ea631da28a", + "sha3_256_hash_of_public_key": "61e27e954728e2e2e230c94ff009417d7372938e2c29c38af22184eed530fa1f", + "sha3_256_hash_of_secret_key": "8baa58b1d3fab8ec5cee8841c9012506cad40bf58a677adac88f1a6400506d40", + "encapsulation_seed": "a3963ade17d69debbc358dda82c7bebe2c39d25b36813058e7a161542e3f8c2b", + "sha3_256_hash_of_ciphertext": "7d47a21d95483a5845a4fddbb07b3435c29a56b5cf26f5d0abfa21bc39a2f2e6", + "shared_secret": "2c7b983d66978be80250c12bf723eb0300a744e80ad075c903fce95fae9e41a2" + }, + { + "key_generation_seed": "684a29e4e5480a5f2533e1526b5fac8cdf5927f3d85087c71f928c59690eb56575d12195ec32a8686d0600e45d4a7f54219b0d7a3826d193a51b9156ecf2edd6", + "sha3_256_hash_of_public_key": "672e53b28d579974d268132187e7bd72238639c6f2ca154d50d98c74096ec330", + "sha3_256_hash_of_secret_key": "4c72f0a7ef5c3274c49365cca5e6770bc709ef12bdbd4fd7c2eb5faa296cdfe8", + "encapsulation_seed": "97beafabf2c8575586487c7a80e8af5fc50f94b6051c1bc66a5ae9f66be3cea7", + "sha3_256_hash_of_ciphertext": "167b4e8b7517cad82ae0f49795918c4d33c79137a9c3e16000c4c55b30b1d382", + "shared_secret": "bbc58d06cc14f9e96a10acb1789d93b93933f1429cc53a1735b3cd995f086ce7" + }, + { + "key_generation_seed": "d76b3573f596eb286ab5231feec7499686b13021be36cb126c7ebeb9d7030daf248c0a21ea0bb6d6f56f12300e8584d8e9a34e0e6f52227281151ae4c305fb8f", + "sha3_256_hash_of_public_key": "b86d5b13bb8b72a9fb81245ab712f0d10f0e2e09b222143c420e3f2c3acea27b", + "sha3_256_hash_of_secret_key": "c25f2e16a0e6fbf0729e5ee89fbbdd71f00ff9a1abbb00cb47f26e9989eaf678", + "encapsulation_seed": "75461decd34c50d6a094b4a64fb75e5e9479f8f9250d82bb7d729dedeb2d4b65", + "sha3_256_hash_of_ciphertext": "8919940aeb732930c496fa9832b0c09382663accda45be1ee22930c545eb3a37", + "shared_secret": "e045e0391e15a66d6208467078f2ba5e429cc586c410ca6c5f3c032c21761955" + }, + { + "key_generation_seed": "b87439fde81c9e39eebe7cf741c685785532c1dd23e8ef868b9ce7a541010f3d1646460817a0fce5836bdfe124a7448e7adf7b8ecc2652ac6d280e986682df71", + "sha3_256_hash_of_public_key": "85441cbd71c18717e9de7359b920a9a3bb7f32e619806f4e4718c585085be624", + "sha3_256_hash_of_secret_key": "93b65d2df33d3e3ab0d53c1d0a21f3752e2c5962f7d960b888b2a8c495b1b133", + "encapsulation_seed": "2607dcf4fd6ca1c614c21b5e37c24981c32b91c8c3e6955777da8a3f5d9c9335", + "sha3_256_hash_of_ciphertext": "422509b01b8fff9468e867a2b5ebe5d3e27314de5c058b2c79a61ccf464f4df7", + "shared_secret": "0b8584b75838e084839d58c89cb1749e82ec06a0e85464c7546dd96870547d29" + }, + { + "key_generation_seed": "056661b38038da4fdd7426f32a81576c73ed84843b305168a374f934e27a4e1b79238a80dcfd7c992d84b2dffa67493e669243d4fa38c46b090bdf86bc548411", + "sha3_256_hash_of_public_key": "065fb6156acaac591f1bf3ce71c4a046be8c6c55eb9a84d29569bd2b144c73e2", + "sha3_256_hash_of_secret_key": "0121afcc6aeb8be9f1c5b06d5b65cc1c03e9366ed7b85fc511d853c5eee230cc", + "encapsulation_seed": "38c89bbe7145c29e9a831c11431eb9929cb24fb4992db20737e4687d397fd732", + "sha3_256_hash_of_ciphertext": "f1d3b745d86f860e508ad8b6d5c8a72ef833c280ec11e99516f4ead3c42509be", + "shared_secret": "3547a15b5748990a5436bdc4db283738eb7d64bdb6ff566c96f7edec607ccc9b" + }, + { + "key_generation_seed": "a1b52d871612a1c611ae0944f9e71858f35d3bd14f20e96a931720668bdf0a6b1f135cf64b6403e103afae34da038613e2853bbfc36baafa3c6a95347193f37c", + "sha3_256_hash_of_public_key": "ced77d358342759291c2bd225b0bd82d659d28a24bbc5eda8f47975b780cd129", + "sha3_256_hash_of_secret_key": "16e06287bd8d71c78f1657bbd6d5d12c22f6bad7658e68dd849d7751da950860", + "encapsulation_seed": "b2c35e33c72d90182791f0e12a0324f5b216efcab2c8da1bee025dfbe13f4152", + "sha3_256_hash_of_ciphertext": "fdfd351fbb15c92843b44489fee162d40ce2eea4856059731490afda1268b985", + "shared_secret": "852ba9be42763c5a74a75778eb839a3738a8ceed1520b0588f9dccdd91907228" + }, + { + "key_generation_seed": "952b49c803d6d6fba69f4375adce8594847a00bcae2179da49af2aed0423250262d7033947ae42ca53522a65fbafe18d3bc3e0cb66164e9a094fe4b44d8977ed", + "sha3_256_hash_of_public_key": "2fdb7c7e39ce1625c20a13a1c91aa5909d8b03b064d00877dce2415020370c72", + "sha3_256_hash_of_secret_key": "ffdb52b23a9ca4b71ec882031ebcb33a0ecc6731c13c817b24f3a06e48273778", + "encapsulation_seed": "afb7d6dc2b7eb6d84acc080c1be63c98afe7b07786b5801f716444a3e8e64800", + "sha3_256_hash_of_ciphertext": "215d83f872221c5fd4ee4da557e17299dc102c52dba1fc4bc3f8c16805da7f1e", + "shared_secret": "618a8496b8850609c09dd1d18798ee2bfff3ed7ef6f8b8034fffcec98f291d69" + }, + { + "key_generation_seed": "3c815e57e9233e975fa1630208aab206b71ae0db37a7a8789ac683d9f9b2d29801c8e376fdb140ee343106c093af7cb149b316ba79446ceb4e5e0cedb9b164f9", + "sha3_256_hash_of_public_key": "86bb11e7d9c1368fbba34ce3a2f169c2464ef5fbc11f73843c456467b6cdbd4e", + "sha3_256_hash_of_secret_key": "5d46659798d268f1314ad1e7c1735c480301f5877773403966e928bc3fd33d1b", + "encapsulation_seed": "28f5e9dbda122b2cf8f3754fe9e0c73a84ad4b0c093522e0b62cf815d60bbc3c", + "sha3_256_hash_of_ciphertext": "5ff5d6bdb110bac57e58a4e288d056a1384f9823606a42daef2ae82e0b7574b2", + "shared_secret": "cbb8b7a05f48b47d163cf8c2fad32bc586f47f2c2e0911da349f29b1e3286c22" + }, + { + "key_generation_seed": "588760826dcfbd36d9abe6ae44a669bb3ebba6a218eab69e30f18a3bd536576e0e860576285483bb5fd36e2f944d32c4317bebc1e441470c1372046a790d79d4", + "sha3_256_hash_of_public_key": "29253478090cb4d580bc2a912645bc685061e5d4437b3811eda69c865ea9923c", + "sha3_256_hash_of_secret_key": "aadce411f3708e9727e4a7e4e198781e1ef5e8f4c4c14add1e25f5758649e265", + "encapsulation_seed": "b0d713cbef0bb1df70cbb425d1e9373e9f7790fdc7980cc96a240dfc53f1e8e2", + "sha3_256_hash_of_ciphertext": "675039d66fcb631a050a8b24415b50f331350bd6697f9c977eef15c15d4cacca", + "shared_secret": "1eef87404f318351413d52ba8a07cfa5e72f235d6f91afd7fb8ad3e683ce0a55" + }, + { + "key_generation_seed": "47550e9edacb6ddce3d9ab81f6b61080dd4f2693854acb05e0ccc7a4fb6390fbf89d7d99d5c3e0d10d6ef9af054d842375f695abb28e3b8eb495100f04306e92", + "sha3_256_hash_of_public_key": "286de7dc142efe935e84b0aeebbd32d050fd9d8b008a94e59454b19ea401611d", + "sha3_256_hash_of_secret_key": "a6b53edf9efd7fa67a478456a5b6a379876c248f623ea45f4b541a8db00c524e", + "encapsulation_seed": "32bdcdb7059fe27f6409901980c080308951ffd90deffa8317b4d213a5f04495", + "sha3_256_hash_of_ciphertext": "f03d44bd9bdf3bfd486919fec2177b8b685a9981de4cbc2a9e98b7e9b0a528fd", + "shared_secret": "ca2c0bba56645e4fce4b7e38a7bb4b839e754bf2834a302a2614377eddd6ae60" + }, + { + "key_generation_seed": "610afb64be8cc1df288cfb016ee2f44c6c07113de7f6fee071fe0c3fe31c6215cd292e4c5f9e1a55e0489bceffb204d672a6215f4f3980a646d9f880817c52dd", + "sha3_256_hash_of_public_key": "029a2e12c3e6aa668afb5be8a82576813fac7b8e61c5a88aff94ecc2770c585e", + "sha3_256_hash_of_secret_key": "413ae41ee83e17b74ac654c2aca57abe8f8ed0409acf7cc8b301e3d6bb049cfe", + "encapsulation_seed": "4ed7c92d83bd03b2a25b567f17ae55542e2f6a4308ec0f3fe69f8ba5ae24331b", + "sha3_256_hash_of_ciphertext": "e8992f7b7b619c03cb9f0c991e3a9c20f91beb707c177ad4e02a5808d10d8769", + "shared_secret": "9155619e28de6cc0670ce70e0ad270f0e885e5f5f8d6d38426938ae1036d6ffa" + }, + { + "key_generation_seed": "e1953800acaa85ac02a906c72cb8e8d704e8d27820345f88f71e89c1f549afcc8c64c049c6dfc0f1476cffd520b055756162f7ec94243de6b14ac0b9e5fb366c", + "sha3_256_hash_of_public_key": "e3ec3671cc7675a321af8584a0961101c04a432772431e77f5740ba3b2ef488d", + "sha3_256_hash_of_secret_key": "93bf696bf0671c3845c4b246f29701a0978eec5b49de81589009e235903061e0", + "encapsulation_seed": "060ea5d2ed1dd88144a9885e79278590821c22917b55a48920f96b53ebe0e689", + "sha3_256_hash_of_ciphertext": "6634bd840d2dbb01463cfe5b4e3e54d1eabc081cfbdc14d0bc118911ed8d3cce", + "shared_secret": "d1f24383d5b8d0c3c0a6a5f8f7d38ccce13ec179a84b0b09bcda4c9988f3eb4e" + }, + { + "key_generation_seed": "c719f9b2d16399b7326ce4eca30dabefe8fdaab18e9f6df888b0a134ef355570e40771856eb77e4633504899fcb86c6a3d433d0b8d60e26f07bd61f1d4ed69bd", + "sha3_256_hash_of_public_key": "79836213a513bd4cfd42ed281304e3ee4560e4e0c60fa53781f83d5bd2bbea52", + "sha3_256_hash_of_secret_key": "65deb55fea451375ef335e7faac73917d32220fc70c95f371fdb16e712beeb26", + "encapsulation_seed": "10ef9426f8c4a13b52325c5bb4ead4596ecf2c6b5bd2d37d8350e90d4164fdd9", + "sha3_256_hash_of_ciphertext": "ba79883ad64a6f2b256004233d87809a8c390327a23c739334f773507e003aa7", + "shared_secret": "d2dab0b39b7f62de3ca9826f9dd15a4201191a0e0c690d3e52b305a9d3af2d0f" + }, + { + "key_generation_seed": "e9acbb774be970206c3a738e243b420805a509fa59fa902044be2f0d013650d2ded5edaec5de3bf5b4d7c2f2e18e87f499c1968993eff196753db8045e2c8ba8", + "sha3_256_hash_of_public_key": "0c2e803c2872400c49e1bb10232946ab939319e84ff32cd354dc15d082cde5a3", + "sha3_256_hash_of_secret_key": "d37f172803739d074d71a2be32125eb1ba4250128342e34b882fcba38b259248", + "encapsulation_seed": "a4bd30a64cbf29a4e290fa1cc1dfb99e68348713041e4409a1af23c5d80c15c4", + "sha3_256_hash_of_ciphertext": "13d437b2fd9d67ca0699a3dacd977fba5d072fa6b482043d63e8a9548ba6a3fb", + "shared_secret": "6869ca370a496af2dbaa866265d91ba6be54b9686b1b8dd5714f6ba861b0d1e8" + }, + { + "key_generation_seed": "c1b3cbffad4b306f9af0cdd3028876486dbe858875c9b6497fe20172a986c82b1c96249919cedc2369d8d739ab125e0d2ccb82dfebcd90240a545cdfe07511f2", + "sha3_256_hash_of_public_key": "5818ac8d7a38c781e3a0bc43d088e6d391d1d67d9639b260bb6f58a19a57150d", + "sha3_256_hash_of_secret_key": "280e4774d1b2401580216fa70fb24c2c214ac5dc7f3841710a42e14d6aa09663", + "encapsulation_seed": "f4b66a7d3b65b896dfe100b2cad24b175a1168cfd2ae11fd704b835f6bcd311a", + "sha3_256_hash_of_ciphertext": "51eb70249a1abebd5159f1069b1acda2304f25fc9cbd9f4a625b58df448b47dc", + "shared_secret": "502d92b2a7e1804892ffb8ff009987a58f35baa30c0392c83859fde82105a9aa" + }, + { + "key_generation_seed": "ff7495b8575b5a98e4fd21fb4c3e58cbb60f14bef21aa74cf8802e3153f14807bdc370460375a778d1a31d01c42b66367ed8d9e8f84551002f552f0e52102b5d", + "sha3_256_hash_of_public_key": "172cf4f8dace8a96b8f70da966080a5e3f132873ca7544343377a99b65e8147f", + "sha3_256_hash_of_secret_key": "31136804b6c14f3a0a00a3295a5fed8d606369e64d272d432c59d7fe0ccc3e47", + "encapsulation_seed": "1d7b03d3c5eefb8ae5799dc569aa668f1bcb8c86607b089d3530cf61d6380147", + "sha3_256_hash_of_ciphertext": "9b38b66fdfe80acab82bf9577676f6566b4429f78a14f7486b07c96ae7be921b", + "shared_secret": "48eb4b840c0d957f28808e434786c02a8f99d3464ccb3caf91cef4a0f8e70c4f" + }, + { + "key_generation_seed": "bdc3fba1c32751139fc45bacffb3ea97f26573d804a5f27a459293d95190ed8efd5a08f656a6eb8cd20679930a31caa6a6331c4b133a6838c223ef9f769f6246", + "sha3_256_hash_of_public_key": "268b6356f92c57da6dd34494b927e8764adf0ad519612ef0d1b8951e50966c2f", + "sha3_256_hash_of_secret_key": "3bf02cee24670ca40b7280d8047fa147b24c5e286dcae9c24bace9465bb19f61", + "encapsulation_seed": "554f3385b382f4a46314de37ee3885addfc5332bd4038785094e0a832e9e8c2c", + "sha3_256_hash_of_ciphertext": "fe8c3fcee4be152aff29e55f42f2fb1354ae55ccbe38400bc901ca032ede1ef6", + "shared_secret": "f9507f70421be90f21138a1e135329ee8228682cc948a6914ea58624d396df0b" + }, + { + "key_generation_seed": "447f6076a627bbc5ad7773fbfeb14b4ba9ac43a0f8b99fb6dcd5e452aa3c47ec20a7237801f470fcc2bd9fd7bea8322859b850f7882d362947432913dd068c01", + "sha3_256_hash_of_public_key": "4c6d304e0494d88d83b5e3aa5761df3b299551a24f28994d2747b2b08945bead", + "sha3_256_hash_of_secret_key": "5de91ca73756eee74da3cac78a1fb329a02f8587f212bb9bc0b29e0e654a5795", + "encapsulation_seed": "38bf0033b779edf5367d9ebc01c988af90904c560970815837380650e4749eea", + "sha3_256_hash_of_ciphertext": "805ce0ab06c568b614cacbfa4cce5e65929e2846932a90e9418513dd48cf3358", + "shared_secret": "24caabaafe2063f812eaf57c58b6c0376ed8ff778cec1980ee9c3228801a75a5" + }, + { + "key_generation_seed": "2d5df64d62cb07fe630310bb801c658dbf3d97993e68626745de39d37fbfc2b27b534537addaba4ecf14f02ab317d36cb9f0f50222ced7cf029dff8a0d3d2fd9", + "sha3_256_hash_of_public_key": "72be2f5cd569e6229f00014854633f7b278e90af4ea593411909467a03e29cfb", + "sha3_256_hash_of_secret_key": "a68ca31b91491a129af9f280cb4c60c046e7a7ccddf41c9bd98663f8512ca34b", + "encapsulation_seed": "048ea516d0ebbd9f709b47eaac66f344c571cf50f0d01c9466aa061a50b66a24", + "sha3_256_hash_of_ciphertext": "d27a36808f09d6165aefc5d253090027eeff0653268c55a0b3de2a751ec765be", + "shared_secret": "9f734b15fc7dd99bc10d6cc7de5d2c93ac789a5665e508a95d075dffbad25abb" + }, + { + "key_generation_seed": "25056d1b8113bb362dd979d98643d7a7ac9c4f95994c0ba060609b6d07002ff3f48a9254dd40b117941fa35a66bb50296327b725525deef70e128ca8045ec451", + "sha3_256_hash_of_public_key": "0831c75b153fa17d336a79ff6e88ddf485daf7b1b0bcf39d8df15319d52ac67e", + "sha3_256_hash_of_secret_key": "2b983d7cb50880cff761441b6a2c66b7a41642cfd2a8cc297a5df53f0ed1947f", + "encapsulation_seed": "686c921c9db1263e78ae753b1c9c2e7936b8229dca48c0942c56c6bca4f10917", + "sha3_256_hash_of_ciphertext": "0892527da24957468b1b8fab49ad2d7dd6d238eca54624fce6a3c2dbbbe8d194", + "shared_secret": "d27e55f2a1f9ef336c8537f11da9875e03cc7dde8951d81b0740457609654107" + }, + { + "key_generation_seed": "e4d34e12982aeeb1d62fd488d9b9e28557ed3429292239fb4f76fa9098009acae6c45c7fc62329b13c8d29844405db8ff6860de474bf727ecd19e54e6e1a141b", + "sha3_256_hash_of_public_key": "b30cedc4316b63d75b641fbad2f33241a3fc47ab8b3ee1a3ed597e5b04f77c68", + "sha3_256_hash_of_secret_key": "a49a7533c671e533deec55af218ee511c57014070e138c7059853e08c34b0a78", + "encapsulation_seed": "2387772e50059cabda53cb93ba24b19ae529496c03b36584169451525c4a0e7e", + "sha3_256_hash_of_ciphertext": "390b3b6f9a0f9d97ccd452c83bf47416b22fd06b4d8968c44ee6effa7980e68c", + "shared_secret": "ed5903d1cf02861444cad7fc3793b4e1b9b6d0324bf6babfb768bb2f84300086" + }, + { + "key_generation_seed": "cd6a99396eb3539ca663a51e42063a3a262cc1c5a5fce1566f0597b52ad9fa325a3407f591791a5db4578b5972093a95bec3b8e70c1d542c9b5c9789729f8922", + "sha3_256_hash_of_public_key": "ee044dbdf6787ff038dbf9c133557169c62fc1ce2580739369aa87df00b49648", + "sha3_256_hash_of_secret_key": "9e865967f0d1e7d3f6a49f2bb623ced2a7b1408a945e02adbdca35846b70e7b9", + "encapsulation_seed": "155c29c5f0378df0cd0e847a80a07143cf7522fcd880c9229eb9feb1ce340cd2", + "sha3_256_hash_of_ciphertext": "6858db6eafd97259e6d775d881f7a877010179d4f827680426946b9ac4571261", + "shared_secret": "0d301028c1cb31dedc8a702a9e95b7d3589f68a6a1f600af84ae0f543e625361" + }, + { + "key_generation_seed": "6c8c53ed6f65e6b2e324b84364e10de42d1c26a106d4d1c99eee79c78586fb55b9402bf02481ce4b27a52e87feb92c4399c7f2988d40e942e7496ad15ad2aa88", + "sha3_256_hash_of_public_key": "e965ac6995d525e324e8252d8e2c2da909a29b24baca8b68daa5122cb539a474", + "sha3_256_hash_of_secret_key": "91051a381626e9465fc7ab20a1944eca64be461330bda53e7d1838a74597392d", + "encapsulation_seed": "a9cb9a61a3324b1ea5afe693b32784e2871096b2ca14a11acc9577c52359a241", + "sha3_256_hash_of_ciphertext": "42bfb5584610497fbc8080a664139afa534b39a417cb69ab0d2a16c8737eb1cb", + "shared_secret": "354d86b389021a3196b75c6582927b3a005fbfee0951f34d9cd5c8f415fa50f9" + }, + { + "key_generation_seed": "2107204cd995f1df14314d5381f8c5440f09a347502e161cffc0a2ec3dcfbc7324c3da70fe850e80aa818301d60c70f3038153866dcd5d179e22db59b8991bb4", + "sha3_256_hash_of_public_key": "a3d8a85f38cfda38c66ae39b2f9186ef7bc1e0c98e8976a6cbc6c4875d73d7fb", + "sha3_256_hash_of_secret_key": "cf7e797f8f7229a08206034737e54fe46645ab2fabdbfc8662b45a2604876b65", + "encapsulation_seed": "e99fbae8a024ebbbdcef32ce213f6aa942e3eca925e5da4c09975d773b33a175", + "sha3_256_hash_of_ciphertext": "ce7b65856502b280e02a36d906e018c6a23cae99f27ef6d65762c87ddfedff56", + "shared_secret": "3afcfdc446f93a8169024a24fc0383692843cfd6b4854a8e490892fc35aad4cb" + }, + { + "key_generation_seed": "63a925685a8ac5bbd918faa33ac397d1ffbcf99135d9da7c3d6ff7aa4c50af3d3afdb8a246a56ee71465591831c371f2eb87467b0559dedd776ba063ee6d2f93", + "sha3_256_hash_of_public_key": "aa73b40dedd61e6fdaac86971965c03ab14ae69e8130426fdf830bd57d0974ce", + "sha3_256_hash_of_secret_key": "1e7f3f1e5632d1df538b564304f56689742d1f652d8d32f019b45183af68a20e", + "encapsulation_seed": "67a216f37d67f5e74f782f1badbce1cc8c80a6130aec305b421899a4faa0a6c3", + "sha3_256_hash_of_ciphertext": "b6c40fd53bcd9ee1e70bc6783b402ae34c24dec724e63262d8583c90cd10256b", + "shared_secret": "ebba9a8bae936c829c1445c68595da96919041ee3d9b0fe27ca93db691146874" + }, + { + "key_generation_seed": "6a1aee5e708c1b47f02bdacce4f56c860f74fc7cfec1ef3b58285b1c8ad7fec2230e05b7114ff0395cc6634db1eae8258072d09c09f291e92d6620b177dc50d7", + "sha3_256_hash_of_public_key": "cf754f2ee43694865a09ca7beb0deda9b1328fd0abdf30ca5c338e27e8be04b5", + "sha3_256_hash_of_secret_key": "928592604aa44df8f2072f26e9511129f61da0b7f57acb3f6896635a9764ea87", + "encapsulation_seed": "52b19fea232c9154a3e431e9d69cda40013cf2d485c3cd027ad24e645420420b", + "sha3_256_hash_of_ciphertext": "a4b50ad169b436877652a6c64dbbffdd63f53274ddcf58f3c96c3929215aa956", + "shared_secret": "f063c0908deb2e61faa0c4c0f5051b2c8af7265060681df14bacb30f0228b3b3" + }, + { + "key_generation_seed": "6396b328b100e4c7f4bcae69875edea1a1982421558c608c13c592bf7b5d0fef1100ced48add211a5c937b8d6079d8e271af3f949edc61f70e60453aef20dea9", + "sha3_256_hash_of_public_key": "3a842153dee9e035299d7e268c9492d71188f9fb24bdc2dd20c1ddca647a1523", + "sha3_256_hash_of_secret_key": "28ee987bc4ae5a321d2669950dbf87596fc4b35c29f192836005064aa3dadee1", + "encapsulation_seed": "64440adb05db3308b189bf999f9ee16e8ee3a6ccbe11eebf0d3ae4b172da7d2f", + "sha3_256_hash_of_ciphertext": "126b64a28d82d06ca81f7e86d33f4949634924e04528d1142061320eaadcb841", + "shared_secret": "02d2e466e170bf45d3e9d357e2f04c34cda408cf147e9ff7a6e8c715f2c88ace" + }, + { + "key_generation_seed": "a453bcacdd2b0d4646009e5ed451c3c45f08fb827ef733db3c517a9dc1af93e67a3cc8aa3239d4c52ce4c95afdeff6efbfacac10d294edc0e7cf4535059bfdba", + "sha3_256_hash_of_public_key": "da43cae3c4da51d69a57eb87094a03cd3a9c3e6b4ed864cc691a60f0509cc646", + "sha3_256_hash_of_secret_key": "b204cd1c3122b29a3d99cb77e11427fc102375699928c5a6fe816f96bb212627", + "encapsulation_seed": "c8bb46b3a7344ad170c2052fb042b5a3b62e0590562ee82577b1081f6f114d16", + "sha3_256_hash_of_ciphertext": "228dfe300e3fabe4d4e550754ebcbbf72a796209c1d24e7ae93abb79e1cf17dd", + "shared_secret": "6a5b0842c122ab6ee251399492b061d2ab3e40843f4dc01c12fbd5bd545c600c" + }, + { + "key_generation_seed": "47ca2b77c5b717f423222c2730ca5cb9c856bc951d01b2b2c80bd76ccb5539b78f1481d7cab000e33fa07de8dc9627a85e76fabb4428a3376e66300cf12a0787", + "sha3_256_hash_of_public_key": "6533c524a32345eefdadc74a3c6ad7e981832797faf1068955b79f118dff9358", + "sha3_256_hash_of_secret_key": "b9dee52055b1f9a2b25a0c1be4d9f30d2ecd7c5a09f0f5294de2d49a55ac9fe0", + "encapsulation_seed": "2e2b70609f3fe029a14d09d5d659871ac776ce2797a0355f16e2eb68f5613fd1", + "sha3_256_hash_of_ciphertext": "2d7e8fbd6f2257b05eaaa2ca1643c452b4e0b623c9ad72027cca8dd8b7b5b91d", + "shared_secret": "2486c0a6cf17d9635dbca1f8395784cde54dccb7df10fced92183f983478fac1" + }, + { + "key_generation_seed": "aaf6eb40e596a5e3e8218871e708b089240dcbe7fd3641f0e5e41e071ce49107e2f8d320ac3cb0c52efdc753282f092bc39baf4a18783a48ea031a191865eb78", + "sha3_256_hash_of_public_key": "e2f60f27da7f318eb94a74b437f8e0bc9513e9bcc38dad99c174c1d75e0145f1", + "sha3_256_hash_of_secret_key": "68eaa8143a71bd5f6df29b128781e3f2a5fbc5d20534afb223ddcc64bc767f5a", + "encapsulation_seed": "4725dd8fb314bfd8ee23731c2341dbe114606d9abe6434c471b5573e7df193bb", + "sha3_256_hash_of_ciphertext": "b5b2de55cfaea8fe543f67c4f45a69780c3e2d932e56e0b574d9b40b56ddc1f1", + "shared_secret": "85690ee044e4d8e0540ff984775b59bb5134383c4e229e79e37d7d77632fadaa" + }, + { + "key_generation_seed": "6500f32c93415cfdbc0bd31d78d5be95cb9060c8cfa2013955b56f8b6868b322393308641a9a4647f230201e1389624a296b55192a9819fcb19ab77c25f95445", + "sha3_256_hash_of_public_key": "d4bf608793939ecba27dff5889d4d921c583999a57e20a48085ac549573e6abf", + "sha3_256_hash_of_secret_key": "5f9a14a9c41fc228306d79417015408f31bc9c3d97579616bd68a3d3444f9bd2", + "encapsulation_seed": "818d3bb8ebfb32bf464775f7139bac0a5bddce80ec5798595992f9403002cd5d", + "sha3_256_hash_of_ciphertext": "99fb7b7767fa94e74936a6678acfd5a2306b156f90f4608d507768a25403a16f", + "shared_secret": "d179d901a0570bd23aa52570c5c233a2240d4724e81d98c9ceedb74187eb75a6" + }, + { + "key_generation_seed": "7643cef2d62cc5aaeecf754653ea62294cd2208e5bf3ddeea209e3dc45373d49eac9d531a532770837a854b4f5531f6e0c8d6c10183b30d3435498c2dd142951", + "sha3_256_hash_of_public_key": "65f03add3941d22c80d50659f501f8cca1b448d84462ccb93d5f065889484bc0", + "sha3_256_hash_of_secret_key": "e4513cfd1dd2153d30d15b023421cb8e8456e6a40e612847e1713e915a29a87c", + "encapsulation_seed": "c92aa5fb91c980d9cade9ce99d4c75b2ffa7d6a6ff9bd59def1aa701f2a0992b", + "sha3_256_hash_of_ciphertext": "4cd7f0af86623b34c0b137a0516b876daa73ffd65d75871ddc828f86a7e9b224", + "shared_secret": "6d574af7fcb241fed8763b2d0a352870baf85ef686e90eea31f8500c35945ef7" + }, + { + "key_generation_seed": "f8ee95521060c03bb8dacc79f7eb7db640f545f315613a35d447a09e504cb4e13fc3d8392cb53f36ed647364a04e37278a0e0a45b720f4a75c580c9920eba98d", + "sha3_256_hash_of_public_key": "b8a3b8cf4709204a2fdb19889b0022ea655dfd58ff27e17d530510e1eef45793", + "sha3_256_hash_of_secret_key": "1f7cdadf3d4707efe1b7a6173d8f7b8a9f864ab388c3271d79ec424d9da3e896", + "encapsulation_seed": "7e8086a01dc5b3bb9eda25bcc45d27f99874841b97237968495800e007696ac5", + "sha3_256_hash_of_ciphertext": "1ca889a71a087ccee4ee1a178c3c55ce3649583f3db924e5c1003ccabc44091d", + "shared_secret": "b1090cf26276a81c22ef0e4479a4c705fe294d3b892051ddce7eab16495e0783" + }, + { + "key_generation_seed": "b8bd0493a882e3a49b4e0f6256fb1fea0912562fd9ba26ec3d6c9cc12c8973abd7e4b5d8021c486b9c3114d7cbbeb7cd49eba8a61bc2bcae1f1bef30a1daf76d", + "sha3_256_hash_of_public_key": "46fe6c37136273736ccb11df5b6d55debbc087de802404b72a003c5e8c809719", + "sha3_256_hash_of_secret_key": "3177ed170e84ff15fa1e744adc9ce806e431a68f15a7a026c6092bf593dec6a1", + "encapsulation_seed": "bb321ef14d44d8698df879fd52450567657f52a2df8d111185dcd7d4f30a72d4", + "sha3_256_hash_of_ciphertext": "aa9a0ea1823a84bc84649d26e249899437844827fe7c63d4828a5144929fa00a", + "shared_secret": "2fda9fa72321be3a0946d6d914c7ae714b9cc175619ab8abfd1f1fd499e0dc27" + }, + { + "key_generation_seed": "c0407e41ddf48d333978b89bcf2db01e4613425b456249e76a6f25b8a2827bf5b2dca81e3f5f748d23c9d356a2209f6b2d60247b2e45c9808de497f64f124643", + "sha3_256_hash_of_public_key": "a074ed1f76e97d68434ba4af2af0e549204222679e9e643580c35af3cdd247ce", + "sha3_256_hash_of_secret_key": "8f9b3f631d0fb04477846ae09aea725f1cc65b2cdefe2108cdb399c36db9b487", + "encapsulation_seed": "210a423dadd899b810f011794b79aa7f860823ac1962370e791287d3a1afa384", + "sha3_256_hash_of_ciphertext": "a4fb01f55eb2986c1f90cece43330bee1b16d7bda48d617fc94aa14fc540ec4e", + "shared_secret": "23798e8b9eaa0b369842cad83a2bc32206f791229c830d7593b9150161168011" + }, + { + "key_generation_seed": "334382d39164d1989696a2ff77b25a28af8bead9883b5365eb6fcca7c1781cc9aba5068af837be962f439f233593d193ce5e08f7d66efb3389885927b89d2523", + "sha3_256_hash_of_public_key": "26659f74fc9ec372fe18be4ed6aa28b7cd84ad1c0f0115dad011a11d20fda9ed", + "sha3_256_hash_of_secret_key": "5e3f83cb08ff80183879af9ade3631bed2a468e429ad027a5afeafd9a6f66362", + "encapsulation_seed": "bc856afe24213e3d14c3d6f9b89223bbcfb2c890722d770fa3492c1e46d1c302", + "sha3_256_hash_of_ciphertext": "6a4204db4803d26d7b8a769033e047f3b4cb616bf5451b88a1fb3ff219bba9cd", + "shared_secret": "d5c63d2bd297e2d8beb6755d6aefe7234dea8ecfba9acda48e643d89a4b95869" + }, + { + "key_generation_seed": "6995143e8eb8a6e93840f76eec844f67d2b5f75b1839a5040337e61f9806764a0f4dff8e56f68440836a072412a30d851ace2c7c6f02d60e7a8420001a63e6c6", + "sha3_256_hash_of_public_key": "2ca3d8ad2dab1dd8a2f4320658fe6eacabf70d907920593919119cf374516336", + "sha3_256_hash_of_secret_key": "2798448395f6ae3223550e7d5255e6a605b430229f5809b6efd0683a6b9ca402", + "encapsulation_seed": "5fc00f89563e44b24cd67d0ce684effe5731619fd08e7d72e2406eb016afb66b", + "sha3_256_hash_of_ciphertext": "dbd5fc0e1df33ff8af9efd5e281a2b98160f98653803cbd54e3a07292b37fcc7", + "shared_secret": "29d6a229adf49a1139794209307b0ca24be5825b2771809232fb718660162475" + }, + { + "key_generation_seed": "995eff7e0d195c6d0533f3dc194d47e60f9ad14696144cde694d60a95f3e96b4b28f7e7a15a005f92400ce33db073d49b53871594a88fc45e0f94207b5f0f2dc", + "sha3_256_hash_of_public_key": "de62eff56f6b49a156d065d85eaf0aa21ca229a20fa4e1372a410ab1c4ab6e7e", + "sha3_256_hash_of_secret_key": "6766cef3fe644a233caddf208074b58e6e83f8a78aecd00911c29a08f6f0b0f3", + "encapsulation_seed": "ea22a76065db4b565ee1807fbd813b43bde72b0e08407fb867c6a18995025e50", + "sha3_256_hash_of_ciphertext": "4c669e33b0227c9c2040cdacdbcb7d22b9984372587985ed8f860ffc8d037e79", + "shared_secret": "2a56a7a6d5b4c0500ec00a92e322e69be9e93006240889552072482966c54f56" + }, + { + "key_generation_seed": "3e809ec8dd0fec0d911a4e3fac20f70fbb128c5de94dc7184ca7310ae9157a98d8128601c28b1def8d393a0db283229f7c7383152a814e7cefe8ef9d9768c473", + "sha3_256_hash_of_public_key": "66f161d27dc34e1a2f4b98b14a2b221d7eae26a593bfe432487d9994cb480656", + "sha3_256_hash_of_secret_key": "2237f6cbb452d375878b82c474a7c948ff587a5f3ed02bbba1459fa7ff8ef802", + "encapsulation_seed": "e9602b34fe73ad57f4bf6ead99743d645641553a5b9b9bf2e7016629e3e9bd76", + "sha3_256_hash_of_ciphertext": "8a2453a21a031cb8966924607a28882426fab2018826192e9bf833bdd38e0631", + "shared_secret": "ecb62b03f640ae4a9d89685fa0070efa93c24dfcff0d555142f9de25b62f861c" + }, + { + "key_generation_seed": "dbf1c465fff3d9f783bd9ee61a573715e45691147b8904439b5ffaa64f94ff7bb6d75eac6c76ced1b0a025b40a55440712ad8424672e761e9bc400d63812006f", + "sha3_256_hash_of_public_key": "7537e68ccf14e8b7e57090d8f648529dc461ca3950288879e88116acaf57b4a2", + "sha3_256_hash_of_secret_key": "bd8e44337eef01251217c4702c99232c001b33870953473d83a7486fd25484cf", + "encapsulation_seed": "f72b9080a6c051bbdb9b0abc1949034be0f89a9f73fe277ec4d4740c78d04a83", + "sha3_256_hash_of_ciphertext": "6077c60641c03aa8b36213dddf938311ce6b7b8801f967d42713e73249fe7c55", + "shared_secret": "6cc30699701927e07b559d708f93126ed70af254cf37e9056ec9a8d72bfbfc79" + }, + { + "key_generation_seed": "1f7cfd2b70863154e8a69d1758532e86c20cfc763d67c758bd10a13b24e759b5273b38bddc18488024ec90e62a4110129a42a16d2a93c45439888e76008604c6", + "sha3_256_hash_of_public_key": "82f68b15681cca5c2852c18d6e88bcb102a059c1d21936582adb71790cc0a335", + "sha3_256_hash_of_secret_key": "fd483ddc211c5c27f453bca56158e1f8084f075a7b06f5098cc3204427bf8197", + "encapsulation_seed": "f1e5542190db8ecf4b8d617a04fd3783ad0df78bf8dab749afb57db8321d151b", + "sha3_256_hash_of_ciphertext": "5c6cfa16f63b1aa93a2b5edc2f4b14c9782f286f53deedf3153f329a2ae2d57a", + "shared_secret": "250e7f67bb34dd5477471e3a701fb71a8138a1920eb807824380f88a944a6fa3" + }, + { + "key_generation_seed": "3a19577908efd37697b8edc7fdaf47d1bd3ad01a1b77faf794bee5b9c3192a6fa3729672816f3eba84c9638a79676eeac0f22c8a48e0c5d50a26ff0844c66b99", + "sha3_256_hash_of_public_key": "104fbf09445794c0ea0654f5caf70ee09d51c8386d4e1f467b10633c710ac2a4", + "sha3_256_hash_of_secret_key": "73fb93953ae666a9df1bf933ba56b8655ea9e319c0110c78d49f8480ae1aa3fd", + "encapsulation_seed": "74efa414ae171bf60b6f884cb7e5ce12028f49365daccfa23e845d551711660b", + "sha3_256_hash_of_ciphertext": "e51772e769f778067916e81a561ba6f64fae6096a2b4d4b945d9117e7c36e2b1", + "shared_secret": "0210935a18f1add5ebc2e1107bf40a628ef9cf8f6e7cdac81dc0291bb50a5a3f" + }, + { + "key_generation_seed": "ae0f65e29f38804a6759f70f4d01e2aaff7fe1c91ebc4f892dd0de3ab2e68ea5e03ff73e02a217659f53d8c47556bf3d8c94040f630d63605e2d0f923579370c", + "sha3_256_hash_of_public_key": "0f353d6a29813d354471eb8b4c38df93939eb3b1db80ddd1cdd6558a9f2687a3", + "sha3_256_hash_of_secret_key": "8a9edd6278707108652f3a5bc244592cb7a82c24634583ed2d3eb6a176b216b8", + "encapsulation_seed": "0b4c3cffb2ba4380ead13dc0d8acad2356b448a810da1df29f264c44aab6d24f", + "sha3_256_hash_of_ciphertext": "a00c37bd326205575fcbbc100ed54630aa0f2d6dd9e69807d49151ac9a81c429", + "shared_secret": "34169fc520e944f94ff1fa3799db802a4c1b26cb2971bf196259a937ab8362ca" + }, + { + "key_generation_seed": "6084a235f79dd093ef6d185b54e69df33dacee73a9bf2f379004421a10e3a79d9f684fb055ece19459eb464e91e126a7a6e3ed11ccee0046da234d964c985110", + "sha3_256_hash_of_public_key": "12e89c47142418c26396ef0174c02f69dc00022d56494d31af935490edee6385", + "sha3_256_hash_of_secret_key": "bc13b19f01d4cab36dac2154e0fd8fb7d2fa012596363942847f1b0bb3715f90", + "encapsulation_seed": "1c82471dcdfca3a6942061ab4f3d5bf0d197321437c706d9cccccce449447002", + "sha3_256_hash_of_ciphertext": "aed1a4ee810b81cb8ee49ee00e94ff4553f0ad2176fe4d27a09f4e68157fcc3b", + "shared_secret": "b5901e97eb656a09d2dd132528148ad07a0a89f638717eb53516a9ad19aa36bf" + }, + { + "key_generation_seed": "acd1c0217fad5caa4235544dd9de153ab1880ccf4c76f16f236fae4e4bfda04cf03a8abb0a5010f400ae5722a75bdf5a2f6d5b546b34d73857cb1bfc7e587aa7", + "sha3_256_hash_of_public_key": "2fac52ca60594e514333ead02cb1bfa5cd1d9ecda4a0b25ccdfc47ad3f632a85", + "sha3_256_hash_of_secret_key": "2743b7a9dd83a6b9bb5c2685f28b5629b2e31132ac64788a0929557d3449dfc0", + "encapsulation_seed": "46fe60a18124125ab93e0c578f1c02f1bd1301595013001c7f3c2fa56cde294e", + "sha3_256_hash_of_ciphertext": "7a039d19c45cc557036189cbbc63445b3504a689db56845ece99d593f165c6af", + "shared_secret": "df5117706beedfb521f0f021069fe9650d0844194339033de6997dced05268c8" + }, + { + "key_generation_seed": "241191401a63afa750f05662e354dddbc683c776ce3222beb83e3cf913d7ed7ca59b3bd23b49a95bc1fad20070fec930b6060bd827d742b077092e422268e15d", + "sha3_256_hash_of_public_key": "3eb856043b822df9d60b55fccb537afa3cacca9ef50433bde1dd9831e534d192", + "sha3_256_hash_of_secret_key": "398ae3423ba5c6bb05920e83e8939a104c3e4ad91647edc7db1667efe438cbfa", + "encapsulation_seed": "52fb7cb6a633fd2e83f2892bd9441b48fe59ecee6d026f5246fa7f2a5e55ee3b", + "sha3_256_hash_of_ciphertext": "05c9617befed785811fcc44d0fce5ae3a1ec66c4d1217ab42e4b754d0ef6207e", + "shared_secret": "eed6ecb831c881508f99ea115745448a7b312a4fa97f65044ebcede172dee2fa" + }, + { + "key_generation_seed": "b9a6b0c05677e957d41a34ba03bd06f2a9092e31f63389397d7e70fde6409d18e99c0e7b82be89bc3c1eaee6680aa4efd394e40c2b3f30523c8117f7c26a8969", + "sha3_256_hash_of_public_key": "306aed2a804a1c9bad4ab9e59f6126ad7c8633cdd0c2dd9d4c6f639d312ed47b", + "sha3_256_hash_of_secret_key": "88b28cf6fe19424ff82fc2bb096423b71f0cb8cf985af31bc15ceb4ed18a5e62", + "encapsulation_seed": "0f81a5f97082121244403da3feeb734f6084b314b8d94beb11627aa6ad1914e9", + "sha3_256_hash_of_ciphertext": "315ef84926802ecbbb437f8f50927d3a391b55ee6e47dbd19aa9adeebb808008", + "shared_secret": "d6cb77dc96f9ae4bf8b2fc0e277935b3b7b7a59f749ff2c08ad42659dbce386b" + }, + { + "key_generation_seed": "28a96c71577ba00c94f99fe965bc595a26db2b3ca6ab5cf8e443cdd8462b17929c35d165453e5fcdc6f9df64526d9de698f2bd3e6bac6c7fdd86601b9ba5f4a5", + "sha3_256_hash_of_public_key": "9bb3963cc1c5cf2b2d1c6ca76226328ab765a79999ccc71fe98d5bf3b34f51b1", + "sha3_256_hash_of_secret_key": "d8c2492023fb1175a84c19b3ce20f03dd12b1c26b65176d5582c319124bc0e24", + "encapsulation_seed": "31af9345365549ea0360169ed57daf98cc5444799d4c75d9f1f5d615e9df8a91", + "sha3_256_hash_of_ciphertext": "ae36e333ece7ca60c9bc2c4ddd01ca88443fd73bab08502656873b703af8925d", + "shared_secret": "1592f1413331f1871b41ff298bfa669bca667241790370d81163c9050b8ac365" + }, + { + "key_generation_seed": "c08ba2ef8c3a0a043afad931652d7a19e6e8cb670f840de5f1fa03309b2ca9ec5fe6141a25f7ab9f875f79e0a82d6ea5cde5a017ab637d5fdb7c42646a1d71df", + "sha3_256_hash_of_public_key": "6d029bb2121c788b5b6ead7226df664490dae362c4befb615717d81c656b3273", + "sha3_256_hash_of_secret_key": "0f2c7bd16d9289c3c27136df0cb6ebc624e80144cb92e6f0c897f58a53617ac3", + "encapsulation_seed": "774ae54093d694ef40b63b62c73e6c98295f606feb8699807eda1d030ffb996d", + "sha3_256_hash_of_ciphertext": "f8a85f106c6144edf1c7906ec26e292f0390aa9d45a22e67ba2ea018ff565c4d", + "shared_secret": "966f35c6bc47b4525d9af1ba350e8f44ea448cd1d90cf4e9c55ae5878920b7cd" + }, + { + "key_generation_seed": "0e3b30e102d707538c2671060f603bb0b8a014103f132d63b09ece07e4a4c75b11eafeca9e810796c34e8cfce9d59342884456007b01ddd12edce6d10ed87e4c", + "sha3_256_hash_of_public_key": "64c819d9bf66855f6ae70627f04da8378547e5867e2eb9759fe0971efd601c4a", + "sha3_256_hash_of_secret_key": "e85b62236d5c6c691a9076dc58bd5da80999eccc8df973c7d0e7e65d8465ea7d", + "encapsulation_seed": "9f27a47604ab5146caaf0aafe6d149424f8d66e39ba3baf5e6c73b19221b7e21", + "sha3_256_hash_of_ciphertext": "e9149359cc37143b0b565bd413a04f41a7833c5b76012a9263a086ac34071684", + "shared_secret": "aa333af0226492126c6985130ac7df2226a64d6d5c5314ce3f7a99add6696d49" + }, + { + "key_generation_seed": "2478f7d3de6041e7e5cd11c5e2ef483d1aa6218eb126444091535f6ae532fa7311136e2681df2ef881b51a092a9badbe72c9772c169808521c47149578621e28", + "sha3_256_hash_of_public_key": "db315cafbaec2f8a0142f45affff65289e826c9244ab1cb03f9f65df3e3cbcf7", + "sha3_256_hash_of_secret_key": "be98d62e4724c0d960ad4839298d4571f9871033b63bdf10d3b0e589db376ffa", + "encapsulation_seed": "90044031b7597b5e60a4f946b713e8996d0426d2cb013243d9b7d8f8ef159a0f", + "sha3_256_hash_of_ciphertext": "9f9368ba712cfee95f28a808cb2c23116a0c8da3910c0def2ef4e55947d7101b", + "shared_secret": "9535303e6035e30c6605c9e0f10c553dcd73828d8525cb190fea79937e093331" + }, + { + "key_generation_seed": "9d405d3ebdaf35fa8722de431b669722acaaea2fd10b814310b17f78b66147d16ceb14f7662be0c42779459f69a145c0e2ce9f0bd9a0cd1bf32ed5694cc9ae32", + "sha3_256_hash_of_public_key": "c8d853e65b5b118e28b7cb6f0d5d6f282e0ea20fd72f3690a6b232b20a8a55ec", + "sha3_256_hash_of_secret_key": "7a5e854bad628be7b99f524f52a97b0959c0ee67a7a10ad24b970e6e3aeeeb80", + "encapsulation_seed": "a7a31e140891ea37d2b6424b59b1f84f89220f32dcb73e037eb912b389d34a48", + "sha3_256_hash_of_ciphertext": "31b04a4127558df57844413928b29b11547de5afc088d568a962fe080c97f190", + "shared_secret": "0caa79e0054182c15e54159fbe36d9fb09481331a560ccd9714fff81db5615c4" + }, + { + "key_generation_seed": "9a86490f0615f3edf789cb0654066e9ee339cc59f968281f3b89213f83c692edfaeb2ef44d2f608621e831187ce79b2d2f4a20f1568bbe76b0d3d5af36111714", + "sha3_256_hash_of_public_key": "f69bd52cb1d071f1cc7720f949d44f66f40c917eb30f3a4b0eb519ecad2d03dc", + "sha3_256_hash_of_secret_key": "b6ef04e6acbcd1bb072d1cd28412cdb00ee40d04ce5b39442a2efd6756292167", + "encapsulation_seed": "70eb3f791faa91f1f982fa477dbcddeb2c55691c07f93b04cd31b37544c94b42", + "sha3_256_hash_of_ciphertext": "d8fac8ffc3d8dfebe66c219f4189b780d5ba8fe28d5ab79264345639740913b0", + "shared_secret": "744ce1aa5a9c515c6571ad6e2f5985df8434e35e9f714cf3659f184b5db4086f" + }, + { + "key_generation_seed": "6dfd9b575872560c7bdc2732c4a28dac4db04e535eb8e402c3dffd145c09ce47a2985c1c4d203778597947d710dec806e36b0cd949fe460ef141213bfc525e5b", + "sha3_256_hash_of_public_key": "10e01965f9c196d2f5f90ce3ce8f552f8a0d76ba8f5345365392febc50560012", + "sha3_256_hash_of_secret_key": "2b5c6d5fe9b09ab5a027522e699401223ae9d304ac912f1b15f0f647dd9a0a7f", + "encapsulation_seed": "30f4095015ba88b6d969672ca3f438c395dacf7d476ea7a9e805ce932d270a13", + "sha3_256_hash_of_ciphertext": "e8b01628c7d63f16c59e67352399a760581f341ed41535013490502e884733be", + "shared_secret": "726f7d790df4c860a0b2c40de9d62c85d0ff70c704ce5a1b3f6bf1b3e3f66cd8" + }, + { + "key_generation_seed": "6fca9f4e384d8418075cc064c70730801bdb8249899d456a77130d5beeb3662cce7683f8a03d3cf04e46970ff7d6a12494ae12558346dfc8fd9370bf944a0102", + "sha3_256_hash_of_public_key": "7c3991fa7983d0dd6e7157cfb152538466e9d5c3998a2b8ed862162b91ca851c", + "sha3_256_hash_of_secret_key": "72e786018ae9ab8293fa51cb7ca3ff0435e7cccbd5ae02b4680b92c148590265", + "encapsulation_seed": "cf31220f44de862e1719570e1b26e897790159366a385452334fe24cdcae28ba", + "sha3_256_hash_of_ciphertext": "5b2e8a3e38c13b53393c8654e92eeb6251ddbe50de4b3c5203a06977491f2fbc", + "shared_secret": "68f3e22d1b2d8c57bff32160e550becfce535fdcb327394aabeb60eede263213" + }, + { + "key_generation_seed": "e58f71bf175c0550a67e00e0f7b3b7fc36bc2707bf0c93044a492626de36301a7f7054814869cf7625e45647bc1547aff288dbb90699b2ad84893f3b755d9722", + "sha3_256_hash_of_public_key": "8aacd8940ff6fc27f175342be74d48075f8ae9320cae20a41c879c27c1bf815d", + "sha3_256_hash_of_secret_key": "f7399dbf35fcc57a9bff87b0087755faa75267788cd0921b9ebc5cde8b656271", + "encapsulation_seed": "bb5e65669a44e5d5c709bafa98c16ccba6ac2c4ae923334f69a11543eda64f5d", + "sha3_256_hash_of_ciphertext": "aac868f2299bcd272afacf50f1ab0db3d092d33565cffb5645d8b92271e7e893", + "shared_secret": "7f6085840a30c6b1fb9dca782e0c78a2264d54726c04c3127956f131165426c8" + }, + { + "key_generation_seed": "e3fc575ed51513e62aba655d24cd9c8f1c6c848aaffa946c49a53ac3ea59e474d82c2f1bf2e6aebde5660fa73356982e12999d8fdafbb3cb186341d0386dead0", + "sha3_256_hash_of_public_key": "149e0b6b49fe8adba1217c2c57c83f2b8c5f1d92f319e502b184a65869214f75", + "sha3_256_hash_of_secret_key": "6dfa4d29af6a0e8413d5591339c15d2e2cfac3f502f49acca3efb53b53624666", + "encapsulation_seed": "9ddb3aa9c7905d1a438c93bcf78e3e321813580371ab4e1289e2dbf3701972c2", + "sha3_256_hash_of_ciphertext": "ced7a64ce643faebac8ffd39c6a4594732b35f1d6899978ba192b87003d3ad27", + "shared_secret": "96e30641ea4280168da37291a3063342ced8e77b33b5415819938c0bd7264ffc" + }, + { + "key_generation_seed": "470b4943f0fe7fd0d8ec5185aba0d1db09d112934e4fb4787e2bbc6b88466e7b8b2809fd40008be70a6b184981101724bc3d5ec5e1956b510b82fd5ad0668a5a", + "sha3_256_hash_of_public_key": "29b1bff7f12eda28dfedfbf0ac16e27008c9fdc62c35e53b28a312bdc91c40bf", + "sha3_256_hash_of_secret_key": "762a61eb847c017ece920f51d5da7a9036ed8b835bfd7793527321ec635e2fd0", + "encapsulation_seed": "26d90b190a6c3d0d9a86cf66005154e7086749e966e7187c249ccb9329fd3b8b", + "sha3_256_hash_of_ciphertext": "bf49310a35f9ba7994645f12949e658b0dd43d3de76386dc20d08c650522f86c", + "shared_secret": "47e54c85cc0e2503629a8bfdcfe038c3cf692d723d462bab733c7c8e0aa37b02" + }, + { + "key_generation_seed": "6df4385db978d27b27d2aa5e452e4152b36f097503d9581ac3390105c5727e7dc95fa08ed106ce84660e8a4c90bd2b22634e40769aa0090a101c5dddad45edc5", + "sha3_256_hash_of_public_key": "b990059e901097d00e0ebaf40c5d5dab009c66798489d357e760478ce884cce5", + "sha3_256_hash_of_secret_key": "37a044795bd330e4dc60a6d84bc6e99664d1be418b0239661d2ff16d1501573f", + "encapsulation_seed": "7db6d1a129d6123f1f805b79ad3b413012ea86aed42a05e98e7b1f32f9fbbdec", + "sha3_256_hash_of_ciphertext": "329115908d0763110a387c99778e4746861e80367ee90fd821cda9acdb93fd64", + "shared_secret": "8569bd042465a2c4af628425cb102b15ed4f5feee16090e2234f3a884a0fa938" + }, + { + "key_generation_seed": "dbacba825728444921b227cdba54446b3f6881b47be9cd02832f78b023b1bee0e15274a8e2bc08fe818b117ba28c5dfae74d54fcdf6f20052f79be333edc8dde", + "sha3_256_hash_of_public_key": "175eb63c3144108548720ce7ee0f43a9ff3f52a9924efe9f2f59318bb93c86b5", + "sha3_256_hash_of_secret_key": "1993d7639b79f5e4871a7c58a69fec50f96c1424c2c0ee030ac054ae1b88a56f", + "encapsulation_seed": "1d129b27be7384c359d04311fe5c44917d1fde4bfb57314f483ac617edd5ac49", + "sha3_256_hash_of_ciphertext": "8f4225838f2964a986336bacddc40836a98c32cca68c6afcbcf9ef68d9a3760b", + "shared_secret": "c184e0b019c2db772e2c1ca6f97f47478d99cf0c4c5ae1406f51d15815022123" + }, + { + "key_generation_seed": "690eb71fd7052b906eaec09937a8ed374e0b02afa27c2f14399932be5839fad281c38c2cb5cfafac81b96a810ab749b61806b6d54c9f8cf4bf1be0192423288f", + "sha3_256_hash_of_public_key": "9bc32a138a2fb5b6072464172abe0fd97e9eabf357c3fa5391d94a415b53abd3", + "sha3_256_hash_of_secret_key": "3db4ab1393cfc8b1c708cf8efdb1c443c975878898b60182c22af66375cba13a", + "encapsulation_seed": "bbc773ebd2df42c36ae05952d6a64c63a5dfb82ceb3ef4f8d4df3a30ec8c0467", + "sha3_256_hash_of_ciphertext": "f1c85f9530d4471eb1401fcf422a29533738c485a6be25f0b554ebf40b49d49d", + "shared_secret": "6d72e23c8a4cc60b2f14adc788a5c480033bbf6eb111070912bc83ad7b89280b" + }, + { + "key_generation_seed": "32e0ea9089fa928482c0770da545af1bb871a03ce38604138b0d08ea2a10ca2bc06c5bef7b6508409daf847a64c8d30d0974fd3ba7476dc76c46b458a036d884", + "sha3_256_hash_of_public_key": "7ef43a72ef04766f1e899d25c9a005009c788b5faf985123cfb3fb97975de26d", + "sha3_256_hash_of_secret_key": "77431cb18010a604d56fe5a623bed2ffd028a741f176fa09546e9a45a48caa5e", + "encapsulation_seed": "5b17a6adad541efcbf5ae4b0c0452cd2ce32e4f0f8701801c5b63e197c1fcbf4", + "sha3_256_hash_of_ciphertext": "83ddab2e25614544649a1e497b5b21c40a3e154e8a22c270f63cb0c40aa868fd", + "shared_secret": "29e6b1edac0a9aa33066c113167e42c64d70215ed04963d8be2d4c2dcd0f6589" + }, + { + "key_generation_seed": "6fb2ec719f2a0dea152bf3f64b9d148f8ab8ba88f64e61f5db53e12d59f525574f797c007e4061f95c7d56cfc7ee5c49e849dde3fea8f25e7876df2a18515c34", + "sha3_256_hash_of_public_key": "2c0db43f39b672b2cd912f907cf76a0f6fda925eb2d205546431be0b37b20411", + "sha3_256_hash_of_secret_key": "09844e203f4d8fa30728ab388b9d654847febbf5c9cd939cdc11c9c9be24ce9c", + "encapsulation_seed": "61ab87659525de9656af41246f20e1dbe85c24e335e7ecf9493f46168bc14e94", + "sha3_256_hash_of_ciphertext": "a2108ea2c446b566a50c228928893e2e4bde5fafb2184af92eb1314113bde0d6", + "shared_secret": "cfd1b82181543656807880f6e2576f0b095bf84629b3367e9bdede24662ee42e" + }, + { + "key_generation_seed": "527fb88c8bd9a4d6031dad15e63878abd2b559e7e08d61f69e8e78fca964ee6ae32d432b4f9f751bde0496c580a181ffed762aa35454a02d3f1f47ee0394c89c", + "sha3_256_hash_of_public_key": "aae8e61b905723fa092fb95b839f6de3670c39ce0498c27b87d20c24e7f64e22", + "sha3_256_hash_of_secret_key": "3880f7ca8fc33575a7a6d8bb46fec86a3f12e0068630507ed245d8bc278fbe5d", + "encapsulation_seed": "eca2adc3da1fb15f34033405ec08ef2f46163df4bfcccf8842c600ce0bc2026c", + "sha3_256_hash_of_ciphertext": "ec48b3ec403609a0ce2d1268cadda8184ab9629cc5913135ffdecd420eed1aa9", + "shared_secret": "f7331b0a4674969838482b7184fa92e5246f11f5b5e284c3e179effff7eb6329" + }, + { + "key_generation_seed": "ac6fcfaeeef795b6ef9e062f02bf42975fa01e7d91ba832f74e05269a72684d05aeda108ea4d6c6bc0fb958286850422bc357ca67b83c986048e0d0087fa11ec", + "sha3_256_hash_of_public_key": "64e085f67e48f00a7a7f82963e8c67176bff839a54fa1008328c0612f98d83d3", + "sha3_256_hash_of_secret_key": "0bfbc25d9df751f4c30907095eb6d9a75ed07fa23218ad0fffc469f0e55553c2", + "encapsulation_seed": "c4f15bec2d7701339d0ade4835193bea3632edcf89e74992620d9eb623a0d0d4", + "sha3_256_hash_of_ciphertext": "fb74b727ad120c18915dca475f3082cd34ded7ae20a308106384ffb5caa029d3", + "shared_secret": "c89d62938a5caabfd5b30d82ea88aced52ef5f8ec0528e59a654e1f6aff1cc2f" + }, + { + "key_generation_seed": "ba2fb9318d4dbe7488057c33e95e6f054583a2800c41bb83083c330a914a12cfe63f8ffda3565c2424c89b20974b748a65a5aba75133fcb3156dfb6626a83bab", + "sha3_256_hash_of_public_key": "8dab879de09b58d0fc7ade140393ffb5343abbddabdc118fad519b14436a964c", + "sha3_256_hash_of_secret_key": "7c53072fd98ea7bd8c5e873688b1a5650fe7e11c791407ac8c118b7958cf414b", + "encapsulation_seed": "28878249e2ac2b6263422993923a0c8bd05ce56e385ed13c943b03d226856947", + "sha3_256_hash_of_ciphertext": "a1f1579c4ce8eb725e697623321b3d9f55f4b1d0def10b898535ef6614e9923e", + "shared_secret": "204d9272682710b52fb39b1176af3ff737848978770310df0c67996f6cb596c3" + }, + { + "key_generation_seed": "aa6dd1e5799cdf7af9c4fc632b3eb9d51d66e85c8e0a21ec98664fc51ab63c7dfda268813efab5204efa60f78bf81d320d01ac09ac06244f7afbd2d80fd356d9", + "sha3_256_hash_of_public_key": "919a696301240cd6129f66be58e19d99b0d827d9932785cd9ea3d92f7ba54463", + "sha3_256_hash_of_secret_key": "cb1d7301f15951883cc3f287d4dd8fdf5c9b7022f558dff551c2ade5f5065755", + "encapsulation_seed": "17fc65f7fbd7c75ceec421dee84dff5a8cb22764a182db17e0ebe857f54d60eb", + "sha3_256_hash_of_ciphertext": "f02654803493821dd9c2ed23f9e46a36addd5fca0da706bbeeda87a2df9fec4f", + "shared_secret": "76e5f7623e3e867fd12f28dfda4311f7cd90a405b73e994e857f693573fd2b8a" + }, + { + "key_generation_seed": "195d6c86a3df4c21e3007d7f2768b43c74cb3060e0eca77f0a5d3271542b9a84ae77e0f9f21eabd8c0c6eea7767f4e10fde5c2d79b8400bf96b19014b457ec21", + "sha3_256_hash_of_public_key": "cb6d7232426bdbdfdacd373c9190722e7bf342825f7d829185dcc9120588fc76", + "sha3_256_hash_of_secret_key": "a85e24cc2eafdfe40d82f46471112e1359628b9955f3feae9955b48d563ac952", + "encapsulation_seed": "fa0489f3730100609488e951e6aaa15c0f193bc1dbcfcd013bc418d6c507b176", + "sha3_256_hash_of_ciphertext": "17336b9ede3a1c26abe725828a5afbe746035a73dfd4a8fbde5040fbabeb2b8d", + "shared_secret": "874ac966970f29935db73c231e71a3559b2504e5446151b99c199276617b3824" + } +] diff --git a/tests/sha3.cc b/tests/sha3.cc index 7f9aeabe7..cc88a2e8e 100644 --- a/tests/sha3.cc +++ b/tests/sha3.cc @@ -12,6 +12,12 @@ #include #include "Hacl_Hash_SHA3.h" +#include "Hacl_Hash_SHA3_Scalar.h" +#include "hacl-cpu-features.h" + +#ifdef HACL_CAN_COMPILE_VEC256 +#include "Hacl_Hash_SHA3_Simd256.h" +#endif #include "config.h" #include "util.h" @@ -160,6 +166,163 @@ TEST(ApiSuite, ApiTest) EXPECT_EQ( strncmp((char*)digest, (char*)expected_digest.data(), digest_size), 0); } + + // Documentation. + // Lines after START and before END are used in documentation. + { + // ANCHOR(example scalar_sha3_256) + // This example uses Scalar SHA3-256. + + const char* message = "Hello, World!"; + uint32_t message_size = strlen(message); + + uint8_t digest[HACL_HASH_SHA3_256_DIGEST_LENGTH]; + + Hacl_Hash_SHA3_Scalar_sha3_256(digest, (uint8_t*)message, message_size); + // ANCHOR_END(example scalar_sha3_256) + + bytes expected_digest = from_hex( + "1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"); + + EXPECT_EQ(strncmp((char*)digest, + (char*)expected_digest.data(), + HACL_HASH_SHA3_256_DIGEST_LENGTH), + 0); + } + + // Documentation. + // Lines after START and before END are used in documentation. + { + // ANCHOR(example scalar_shake128) + // This example uses Scalar SHAKE-128. + + const char* message = "Hello, World!"; + uint32_t message_size = strlen(message); + + // SHAKE will generate as many bytes as requested. + uint32_t digest_size = 42; + uint8_t digest[42]; + + Hacl_Hash_SHA3_Scalar_shake128( + digest, digest_size, (uint8_t*)message, message_size); + // ANCHOR_END(example scalar_shake128) + + bytes expected_digest = + from_hex("2bf5e6dee6079fad604f573194ba8426bd4d30eb13e8ba2edae70e529b570cb" + "dd588f2c5dd4e465dfbaf"); + + EXPECT_EQ( + strncmp((char*)digest, (char*)expected_digest.data(), digest_size), 0); + } + +#ifdef HACL_CAN_COMPILE_VEC256 + hacl_init_cpu_features(); + if (hacl_vec256_support()) { + // Documentation. + // Lines after START and before END are used in documentation. + { + // ANCHOR(example vec256_sha3_256) + // This example uses Vec256 SHA3-256. + + const char* message = "Hello, World!"; + uint32_t message_size = strlen(message); + + uint8_t digest0[HACL_HASH_SHA3_256_DIGEST_LENGTH]; + uint8_t digest1[HACL_HASH_SHA3_256_DIGEST_LENGTH]; + uint8_t digest2[HACL_HASH_SHA3_256_DIGEST_LENGTH]; + uint8_t digest3[HACL_HASH_SHA3_256_DIGEST_LENGTH]; + + Hacl_Hash_SHA3_Simd256_sha3_256(digest0, + digest1, + digest2, + digest3, + (uint8_t*)message, + (uint8_t*)message, + (uint8_t*)message, + (uint8_t*)message, + message_size); + // ANCHOR_END(example vec256_sha3_256) + + bytes expected_digest = from_hex( + "1af17a664e3fa8e419b8ba05c2a173169df76162a5a286e0c405b460d478f7ef"); + + EXPECT_EQ(strncmp((char*)digest0, + (char*)expected_digest.data(), + HACL_HASH_SHA3_256_DIGEST_LENGTH), + 0); + EXPECT_EQ(strncmp((char*)digest1, + (char*)expected_digest.data(), + HACL_HASH_SHA3_256_DIGEST_LENGTH), + 0); + EXPECT_EQ(strncmp((char*)digest2, + (char*)expected_digest.data(), + HACL_HASH_SHA3_256_DIGEST_LENGTH), + 0); + EXPECT_EQ(strncmp((char*)digest3, + (char*)expected_digest.data(), + HACL_HASH_SHA3_256_DIGEST_LENGTH), + 0); + } + + // Documentation. + // Lines after START and before END are used in documentation. + { + // ANCHOR(example vec256_shake128) + // This example uses Vec256 SHAKE-128. + + const char* message0 = "Hello, World1!"; + const char* message1 = "Hello, World2!"; + const char* message2 = "Hello, World3!"; + const char* message3 = "Hello, World4!"; + uint32_t message_size = 14; + + // SHAKE will generate as many bytes as requested. + uint32_t digest_size = 42; + uint8_t digest0[42]; + uint8_t digest1[42]; + uint8_t digest2[42]; + uint8_t digest3[42]; + + Hacl_Hash_SHA3_Simd256_shake128(digest0, + digest1, + digest2, + digest3, + digest_size, + (uint8_t*)message0, + (uint8_t*)message1, + (uint8_t*)message2, + (uint8_t*)message3, + message_size); + // ANCHOR_END(example vec256_shake128) + + bytes expected_digest0 = from_hex( + "1b82c3db6cb958a09a7ea3dd82b67a9c994422c39616ec373afafcf2fca8bca" + "808881328f9ca03eb119a"); + bytes expected_digest1 = from_hex( + "3c8f0ab13109dff341fbe0e7511bd8bdfa8d13335b36acdb391170017c6d45f" + "460964cab081699f6e45d"); + bytes expected_digest2 = from_hex( + "86ee9003051369f1d5461b00263e01cac1c65defaf722e6ed648fba99743a14" + "9b39abc52d6fc746f5014"); + bytes expected_digest3 = from_hex( + "0b9efd21050944cb5ba5df0cc35a176100201e3fd7c4f2b9f70a9dfd4a7228b" + "5d676451df013d3e22ac9"); + + EXPECT_EQ( + strncmp((char*)digest0, (char*)expected_digest0.data(), digest_size), + 0); + EXPECT_EQ( + strncmp((char*)digest1, (char*)expected_digest1.data(), digest_size), + 0); + EXPECT_EQ( + strncmp((char*)digest2, (char*)expected_digest2.data(), digest_size), + 0); + EXPECT_EQ( + strncmp((char*)digest3, (char*)expected_digest3.data(), digest_size), + 0); + } + } +#endif } class Sha3KAT : public ::testing::TestWithParam @@ -188,6 +351,86 @@ TEST_P(Sha3KAT, TryKAT) EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl << bytes_to_hex(digest) << std::endl; } + + { + bytes digest(test_case.md.size(), 0); + if (test_case.md.size() == 224 / 8) { + Hacl_Hash_SHA3_Scalar_sha3_224( + digest.data(), test_case.msg.data(), test_case.msg.size()); + } else if (test_case.md.size() == 256 / 8) { + Hacl_Hash_SHA3_Scalar_sha3_256( + digest.data(), test_case.msg.data(), test_case.msg.size()); + } else if (test_case.md.size() == 384 / 8) { + Hacl_Hash_SHA3_Scalar_sha3_384( + digest.data(), test_case.msg.data(), test_case.msg.size()); + } else if (test_case.md.size() == 512 / 8) { + Hacl_Hash_SHA3_Scalar_sha3_512( + digest.data(), test_case.msg.data(), test_case.msg.size()); + } + + EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest) << std::endl; + } + +#ifdef HACL_CAN_COMPILE_VEC256 + hacl_init_cpu_features(); + if (hacl_vec256_support()) { + bytes digest0(test_case.md.size(), 0); + bytes digest1(test_case.md.size(), 0); + bytes digest2(test_case.md.size(), 0); + bytes digest3(test_case.md.size(), 0); + if (test_case.md.size() == 224 / 8) { + Hacl_Hash_SHA3_Simd256_sha3_224(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + } else if (test_case.md.size() == 256 / 8) { + Hacl_Hash_SHA3_Simd256_sha3_256(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + } else if (test_case.md.size() == 384 / 8) { + Hacl_Hash_SHA3_Simd256_sha3_384(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + } else if (test_case.md.size() == 512 / 8) { + Hacl_Hash_SHA3_Simd256_sha3_512(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + } + + EXPECT_EQ(test_case.md, digest0) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest0) << std::endl; + EXPECT_EQ(test_case.md, digest1) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest1) << std::endl; + EXPECT_EQ(test_case.md, digest2) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest2) << std::endl; + EXPECT_EQ(test_case.md, digest3) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest3) << std::endl; + } +#endif } class ShakeKAT : public ::testing::TestWithParam @@ -201,21 +444,114 @@ TEST_P(ShakeKAT, TryKAT) if (test_case.md.size() == 128 / 8) { bytes digest(test_case.md.size(), 128 / 8); - Hacl_Hash_SHA3_shake128_hacl( - test_case.msg.size(), test_case.msg.data(), digest.size(), digest.data()); + Hacl_Hash_SHA3_shake128_hacl(test_case.msg.size(), + test_case.msg.data(), + digest.size(), + digest.data()); + + EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest) << std::endl; + } else if (test_case.md.size() == 256 / 8) { + bytes digest(test_case.md.size(), 256 / 8); + + Hacl_Hash_SHA3_shake256_hacl(test_case.msg.size(), + test_case.msg.data(), + digest.size(), + digest.data()); + + EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest) << std::endl; + } + } + + { + if (test_case.md.size() == 128 / 8) { + bytes digest(test_case.md.size(), 128 / 8); + + Hacl_Hash_SHA3_Scalar_shake128(digest.data(), + digest.size(), + test_case.msg.data(), + test_case.msg.size()); EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl << bytes_to_hex(digest) << std::endl; } else if (test_case.md.size() == 256 / 8) { bytes digest(test_case.md.size(), 256 / 8); - Hacl_Hash_SHA3_shake256_hacl( - test_case.msg.size(), test_case.msg.data(), digest.size(), digest.data()); + Hacl_Hash_SHA3_Scalar_shake256(digest.data(), + digest.size(), + test_case.msg.data(), + test_case.msg.size()); EXPECT_EQ(test_case.md, digest) << bytes_to_hex(test_case.md) << std::endl << bytes_to_hex(digest) << std::endl; } } + +#ifdef HACL_CAN_COMPILE_VEC256 + hacl_init_cpu_features(); + if (hacl_vec256_support()) { + if (test_case.md.size() == 128 / 8) { + bytes digest0(test_case.md.size(), 128 / 8); + bytes digest1(test_case.md.size(), 128 / 8); + bytes digest2(test_case.md.size(), 128 / 8); + bytes digest3(test_case.md.size(), 128 / 8); + + Hacl_Hash_SHA3_Simd256_shake128(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + digest0.size(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + + EXPECT_EQ(test_case.md, digest0) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest0) << std::endl; + EXPECT_EQ(test_case.md, digest1) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest1) << std::endl; + EXPECT_EQ(test_case.md, digest2) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest2) << std::endl; + EXPECT_EQ(test_case.md, digest3) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest3) << std::endl; + } else if (test_case.md.size() == 256 / 8) { + bytes digest0(test_case.md.size(), 256 / 8); + bytes digest1(test_case.md.size(), 256 / 8); + bytes digest2(test_case.md.size(), 256 / 8); + bytes digest3(test_case.md.size(), 256 / 8); + + Hacl_Hash_SHA3_Simd256_shake256(digest0.data(), + digest1.data(), + digest2.data(), + digest3.data(), + digest0.size(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.data(), + test_case.msg.size()); + + EXPECT_EQ(test_case.md, digest0) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest0) << std::endl; + EXPECT_EQ(test_case.md, digest1) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest1) << std::endl; + EXPECT_EQ(test_case.md, digest2) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest2) << std::endl; + EXPECT_EQ(test_case.md, digest3) + << bytes_to_hex(test_case.md) << std::endl + << bytes_to_hex(digest3) << std::endl; + } + } +#endif } INSTANTIATE_TEST_SUITE_P( diff --git a/tools/configure.py b/tools/configure.py index 1edd68e08..de8a9fa94 100644 --- a/tools/configure.py +++ b/tools/configure.py @@ -35,10 +35,10 @@ def dependencies(self, source_dir, algorithm, source_file): """ # With old compilers like GCC 4.8 we have to set -march=native for this # to work. - copmiler_version = subprocess.run( + compiler_version = subprocess.run( self.compiler + " --version", stdout=subprocess.PIPE, shell=True, check=True ) - stdout = copmiler_version.stdout.decode("utf-8") + stdout = compiler_version.stdout.decode("utf-8") args = "" if "4.8" in stdout and "gcc" in stdout: processor = platform.processor() @@ -72,8 +72,7 @@ def dependencies(self, source_dir, algorithm, source_file): files = [] for line in stdout.splitlines(): # Remove object file and the c file itself - first_line_search = "(\w*).o: " + \ - re.escape(join(source_dir, "(\w*).c")) + first_line_search = "(\w*).o: " + re.escape(join(source_dir, "(\w*).c")) line = re.sub(first_line_search, "", line) line = line.strip() line = line.split(" ") @@ -87,8 +86,7 @@ def dependencies(self, source_dir, algorithm, source_file): # Get all source files in source_dir source_files = glob(join(source_dir, "*.c")) # remove source_dir and .c - source_files = list( - map(lambda s: s[len(source_dir) + 1: -2], source_files)) + source_files = list(map(lambda s: s[len(source_dir) + 1 : -2], source_files)) # Now let's collect the c files from the included headers # This adds all files without looking at the feature requirements into deps. @@ -132,8 +130,11 @@ def __init__( self.hacl_files = self.config["hacl_sources"] self.evercrypt_files = self.config["evercrypt_sources"] self.vale_files = self.config["vale_sources"] + self.libcrux_files = self.config["libcrux_sources"] self.tests = self.config["tests"] + self.libcrux_tests = self.config["libcrux_tests"] self.benchmarks = self.config["benchmarks"] + self.libcrux_benchmarks = self.config["libcrux_benchmarks"] self.include_paths = [include_dir] # We need the config.h generated by CMake @@ -143,6 +144,9 @@ def __init__( # If vale is compiled add the include path if len(self.vale_files) != 0: self.include_paths.extend(self.config["vale_include_paths"]) + # If libcrux is compiled add the include path + if len(self.libcrux_files) != 0: + self.include_paths.extend(self.config["libcrux_include_paths"]) # If the build directory is empty, copy the `default_config.h` there to # make the dependency analysis work. @@ -177,8 +181,7 @@ def __init__( self.hacl_includes = [] for a in self.hacl_files: for source_file in self.hacl_files[a]: - files, includes = self.dependencies( - source_dir, a, source_file["file"]) + files, includes = self.dependencies(source_dir, a, source_file["file"]) self.hacl_includes.extend( includes if type(includes) == list else [includes] ) @@ -207,11 +210,22 @@ def __init__( f for files in [self.tests[b] for b in self.tests] for f in files ] + self.libcrux_test_sources = [ + f + for files in [self.libcrux_tests[b] for b in self.libcrux_tests] + for f in files + ] + # Flatten benchmark sources self.benchmark_sources = [ f for files in [self.benchmarks[b] for b in self.benchmarks] for f in files ] + # Flatten benchmark sources + self.libcrux_benchmark_sources = [ + f for files in [self.libcrux_benchmarks[b] for b in self.libcrux_benchmarks] for f in files + ] + # Flatten vale files into a single list for each platform. # This is all or nothing. platforms = {} @@ -225,6 +239,14 @@ def __init__( platforms[p] = [join("vale", "src", f) for f in platforms[p]] self.vale_files = platforms + # Flatten libcrux sources + libcrux_files_flattened = [] + for _, impls in self.libcrux_files.items(): + libcrux_files_flattened.extend(impl["file"] for impl in impls) + self.libcrux_files = [ + join("libcrux", "src", f) for f in libcrux_files_flattened + ] + # Evercrypt has feature detection and we don't disable anything. self.evercrypt_compile_files = [] for a in self.evercrypt_files: @@ -240,8 +262,7 @@ def __init__( self.hacl_compile_feature[k] = list( dict.fromkeys(self.hacl_compile_feature[k]) ) - self.evercrypt_compile_files = list( - dict.fromkeys(self.evercrypt_compile_files)) + self.evercrypt_compile_files = list(dict.fromkeys(self.evercrypt_compile_files)) self.hacl_includes = list(dict.fromkeys(self.hacl_includes)) # Drop Hacl_ files from evercrypt self.evercrypt_compile_files = [ @@ -318,6 +339,16 @@ def write_cmake_config(self, cmake_config): ) ) + out.write( + "set(LIBCRUX_TEST_SOURCES\n\t%s\n)\n" + % ( + "\n\t".join( + join("${PROJECT_SOURCE_DIR}", "tests", f) + for f in self.libcrux_test_sources + ).replace("\\", "/") + ) + ) + out.write( "set(BENCHMARK_SOURCES\n\t%s\n)\n" % ( @@ -328,6 +359,16 @@ def write_cmake_config(self, cmake_config): ) ) + out.write( + "set(LIBCRUX_BENCHMARK_SOURCES\n\t%s\n)\n" + % ( + "\n\t".join( + join("${PROJECT_SOURCE_DIR}", "benchmarks", f) + for f in self.libcrux_benchmark_sources + ).replace("\\", "/") + ) + ) + for os in self.vale_files: out.write( "set(VALE_SOURCES_%s\n\t%s\n)\n" @@ -340,11 +381,19 @@ def write_cmake_config(self, cmake_config): ) ) + for os in self.libcrux_files: + out.write( + "set(LIBCRUX_SOURCES\n\t%s\n)\n" + % ( + "\n\t".join( + join("${PROJECT_SOURCE_DIR}", f) for f in self.libcrux_files + ).replace("\\", "/"), + ) + ) + out.write( "set(ALGORITHM_TEST_FILES\n\t%s\n)\n" - % "\n\t".join("TEST_FILES_" + a for a in self.tests).replace( - "\\", "/" - ) + % "\n\t".join("TEST_FILES_" + a for a in self.tests).replace("\\", "/") ) for a in self.tests: out.write( @@ -365,12 +414,17 @@ def dep_config(self): kremlin_includes = [ include for include in self.hacl_includes if include.startswith("kremlin") ] + libcrux_includes = [ + include for include in self.include_paths if include.startswith("libcrux") + ] return { "sources": self.hacl_compile_feature, "includes": includes, "kremlin_includes": kremlin_includes, "vale_sources": self.vale_files, "vale_includes": vale_includes, + "libcrux_sources": self.libcrux_files, + "libcrux_includes": libcrux_includes, } def write_dep_config(self, dep_config): diff --git a/tools/test.py b/tools/test.py index 688159e33..b7da0e98b 100644 --- a/tools/test.py +++ b/tools/test.py @@ -64,8 +64,9 @@ def run_tests(tests, bin_path, test_args=[], algorithms=[], coverage=False): if coverage: generate_report(test_name, my_env) + os.chdir(dir_backup) + if coverage: - os.chdir(dir_backup) subprocess.call(["./tools/coverage.sh"]) @@ -153,4 +154,5 @@ def test(args): # parse file config = json.loads(data) - run_tests(config["tests"], "Debug", algorithms=algorithms, coverage=args.coverage) + tests = {**config["tests"], **config["libcrux_tests"]} + run_tests(tests, "Debug", algorithms=algorithms, coverage=args.coverage)