diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index c444f69da..7d0fdaae4 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -15,18 +15,16 @@ void Foo(){} try ```cpp /// @brief I like comments +/// @details anything longer than ~80 should go in a details void Foo(){} ``` -After making release or tag please -```bash -cd Doc -doxygen Doxyfile -``` -to produce both html and latex file. To produce book like pdf file: -```bash -cd latex -pdflatex refman.tex +You can +```cpp +/// @param blarb this is parameter needed for function +/// @return returning some fancy number +int Fool(int blarb){} ``` +Doxygen documentation will be updated after each commit to develop. ## Logger MaCh3 is using spdlog logger see [here](https://github.com/gabime/spdlog/tree/master). And wraps it around MaCh3 names [here](https://github.com/mach3-software/MaCh3/blob/develop/manager/MaCh3Logger.h) @@ -63,7 +61,7 @@ bool AsimovFit = false; if(config[AsimovFit]) { - AsimovFit = config[AsimovFit].as; + AsimovFit = config[AsimovFit].as(); } ``` This can be replaced with: @@ -75,17 +73,18 @@ bool AsimovFit = GetFromManager(config[AsimovFit], false); Some fits require a lot of RAM. The easiest and fastest solution to reduce RAM is to use `float` instead of `double`. -MaCh3 has a custom type defined as `_float_`, which is usually a `double` +MaCh3 has a custom type defined as `M3::float_t`, which is usually a `double` unless the `_LOW_MEMORY_STRUCTS_` directive is defined at the compilation -level. When defined, `_float_` will be an actual `float`. +level. When defined, `M3::float_t` will be an actual `float`. -By using `_float_`, one can flexibly change between these types. During +By using `M3::float_t`, one can flexibly change between these types. During development, it is advised to use these data types unless specific data types are necessary due to desired precision, code safety, etc. ## Error handling MaCh3 uses custom error handling implemented [here](https://github.com/mach3-software/MaCh3/blob/develop/manager/MaCh3Exception.h) -Instead of throw + +Never ever ever bare throw. Always throw an exception, preferably one that subclasses one defined by the standard library in ``. ```cpp throw; ``` @@ -98,7 +97,66 @@ or ```cpp throw MaCh3Exception(__FILE__ , __LINE__ ); ``` -This way we can ensure error messages are unified and user always get hints where in the code problem occurred. If current MaCh3Exception is not sufficient consider implementing new or expanding current exceptions in MaCh3Exception.h. +This way we can ensure error messages are unified and user always get hints where in the code problem occurred. If current `MaCh3Exception` is not sufficient consider implementing new or expanding current exceptions in MaCh3Exception.h. + +## Compiler warning levels getting you down? + +If you are trying to compile some new development and it is failing because of some innocuous warning that has been elevated to an error by the compiler flags, please don't just turn off the flags. A much better approach is to disable the diagnostic locally. This makes it easier to keep most of the code stringently checked, while giving you, the developer, the ability to stay in flow. +It also allows for later 'fixing' of these warnings, if they need to be fixed, to be done systematically by greping for the relevant directives. + +The way to turn off diagnostics is, as below: + +```c++ +#pragma GCC diagnostic ignored "-Wfloat-conversion" +``` + +N.B. that clang also understands these directives, so don't panic that they have `GCC` in them. + +This will disable that diagnostic for the rest of the compilation unit (usually a .cc file). Note that this means if you include these in headerfiles, they will disable diagnostics more widely, please try and disable the diagnostics over as little code as possible. + +If a specific error is really getting you down and its showing up everywhere, the serious option is to disable it repo-wide by modifying the `MaCh3Warnings` interface target, defined in the top-level project [CMakeLists.txt](../CMakeLists.txt) like so: + +```cmake +target_compile_options(MaCh3Warnings INTERFACE + # ... + -Wno-conversion + # ... +) +``` + +Please attempt more localised options before reaching for this, but sometimes this represents the best way to proceed with development without 'fixing' innocuous warnings. + +The really serious option is to configure with: `-DMaCh3_WERROR_ENABLED=OFF`, which will disable the `-Werror` flag. + +### An example + +We got this compiler error: + +```shell +/root/software/MaCh3/MaCh3_splitpr/splines/splineFDBase.cpp: In member function ‘virtual void splineFDBase::CalcSplineWeights()’: +/root/software/MaCh3/MaCh3_splitpr/splines/splineFDBase.cpp:349:35: error: useless cast to type ‘double’ [-Werror=useless-cast] + 349 | weightvec_Monolith[iSpline] = double(weight); + | ^~~~~~~~~~~~~~ +cc1plus: all warnings being treated as errors +``` + +for this code: + +```c++ + weightvec_Monolith[iSpline] = double(weight); +``` + +The compiler is right, that this is a useless cast, but `weight` can sometimes be a float, in which case we would get a conversion warning/error, so it seems like a no-win situation. We can 'save' the current diagnostics with `#pragma GCC diagnostic push`, disable the relevant one as above, and then revert to the saved set of diagnostics with `#pragma GCC diagnostic pop`. +Putting it all together might look like: + +```c++ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuseless-cast" + weightvec_Monolith[iSpline] = double(weight); +#pragma GCC diagnostic pop +``` + +This allows us to disable the diagnostic just for the relevant line. ## Formatting To ensure a unified style in MaCh3 software you can use a clang-format file which has instructions about formatting code. diff --git a/.github/workflows/CDImage.yml b/.github/workflows/CDImage.yml index 0bbe2034c..215ce5a2a 100644 --- a/.github/workflows/CDImage.yml +++ b/.github/workflows/CDImage.yml @@ -1,3 +1,4 @@ +--- # Update MaCh3 container image registry with newest updates name: Image CD @@ -24,6 +25,10 @@ jobs: file: Doc/MaCh3DockerFiles/Alma9/Dockerfile tag_latest: alma9latest installoptions: -j4 + - os: rocky9cuda + file: Doc/MaCh3DockerFiles/Rocky9/Dockerfile + tag_latest: rocky9cudalatest + installoptions: -j4 # - os: ubuntu22.04 # file: Doc/MaCh3DockerFiles/Ubuntu22.04/Dockerfile # tag_latest: ubuntu22.04latest @@ -36,40 +41,40 @@ jobs: name: Image CD ${{ matrix.os }} steps: - - name: Checkout repository - uses: actions/checkout@v4 + - name: Checkout repository + uses: actions/checkout@v4 - - name: Log in to GitHub Container Registry - run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin + - name: Log in to GitHub Container Registry + run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Build Docker image - run: | - if [ "${{ github.ref_type }}" == 'tag' ]; then - docker build . \ - --file ${{ matrix.file }} \ - --tag ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.os }}${{ github.ref_name }} \ - --build-arg MACH3_VERSION=${{ github.ref_name }} \ - --build-arg INSTALL_OPTIONS="${{ matrix.installoptions }}" - else - docker build . \ - --file ${{ matrix.file }} \ - --tag ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.tag_latest }} \ - --build-arg MACH3_VERSION=develop \ - --build-arg INSTALL_OPTIONS="${{ matrix.installoptions }}" - fi + - name: Build Docker image + run: | + if [ "${{ github.ref_type }}" == "tag" ]; then + docker build . \ + --file "${{ matrix.file }}" \ + --tag "ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.os }}${{ github.ref_name }}" \ + --build-arg MACH3_VERSION="${{ github.ref_name }}" \ + --build-arg INSTALL_OPTIONS="${{ matrix.installoptions }}" + else + docker build . \ + --file "${{ matrix.file }}" \ + --tag "ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.tag_latest }}" \ + --build-arg MACH3_VERSION="develop" \ + --build-arg INSTALL_OPTIONS="${{ matrix.installoptions }}" + fi - - name: Push Docker image - run: | - if [ "${{ github.ref_type }}" == 'tag' ]; then - docker push ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.os }}${{ github.ref_name }} - else - docker push ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.tag_latest }} - fi + - name: Push Docker image + run: | + if [ "${{ github.ref_type }}" == "tag" ]; then + docker push "ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.os }}${{ github.ref_name }}" + else + docker push "ghcr.io/${{ github.repository_owner }}/mach3:${{ matrix.tag_latest }}" + fi - - name: Delete old images - uses: actions/delete-package-versions@v5 - with: - package-name: 'mach3' - package-type: 'container' - min-versions-to-keep: 5 - delete-only-untagged-versions: 'true' + - name: Delete old images + uses: actions/delete-package-versions@v5 + with: + package-name: 'mach3' + package-type: 'container' + min-versions-to-keep: 5 + delete-only-untagged-versions: 'true' diff --git a/.github/workflows/CIBuild.yml b/.github/workflows/CIBuild.yml index 6810fc740..81109e679 100644 --- a/.github/workflows/CIBuild.yml +++ b/.github/workflows/CIBuild.yml @@ -30,6 +30,10 @@ jobs: file: Doc/MaCh3DockerFiles/Fedora32/Dockerfile tag: fedora32latest cmakeoptions: -DMaCh3_PYTHON_ENABLED=ON -DMaCh3_LOW_MEMORY_STRUCTS_ENABLED=ON + - os: Rocky9 CUDA + file: Doc/MaCh3DockerFiles/Rocky9/Dockerfile + tag: rocky9latest + cmakeoptions: -DMaCh3_PYTHON_ENABLED=ON name: Build CI ${{ matrix.os }} diff --git a/.github/workflows/CIPythonValidations.yml b/.github/workflows/CIPythonValidations.yml new file mode 100644 index 000000000..d5e854dbb --- /dev/null +++ b/.github/workflows/CIPythonValidations.yml @@ -0,0 +1,43 @@ +# Performs unit and integration testing + +name: Validations Python CI + +# The events that trigger the workflow +on: + pull_request: + branches: [ develop ] + +permissions: + contents: read + packages: write + +jobs: + build: + runs-on: ubuntu-latest + + container: + image: ghcr.io/mach3-software/mach3:alma9latest + + steps: + - uses: actions/checkout@v4 + + - name: Get MaCh3 Validations + run: | + cd /opt/ + git clone https://github.com/mach3-software/MaCh3Tutorial.git MaCh3Validations + + - name: Install pytest + working-directory: /opt/MaCh3Validations + run: | + mkdir python-test-modules + pip install -r ./CIValidations/PythonTests/requirements.txt -t python-test-modules/ + + - name: Validations + working-directory: /opt/MaCh3Validations + run: | + source /opt/root/v6-26-10/bin/thisroot.sh + export PYTHONPATH=$PYTHONPATH:$PWD/python-test-modules/ + export MACH3=$PWD + + python3 -m pytest --config Inputs/ManagerTest.yaml CIValidations/PythonTests + diff --git a/.github/workflows/CIValidations.yml b/.github/workflows/CIValidations.yml index 77dc48ca9..e4e7dce3e 100644 --- a/.github/workflows/CIValidations.yml +++ b/.github/workflows/CIValidations.yml @@ -21,10 +21,10 @@ jobs: fail-fast: false # Prevents cancellation of remaining jobs if one fails matrix: include: - - name: Spline Validations + - name: Reweight Validations test_1: ./CIValidations/SplineValidations test_2: ./CIValidations/SamplePDFValidations - test_3: empty + test_3: ./CIValidations/NuOscillatorInterfaceValidations test_4: empty test_5: empty test_6: empty @@ -33,7 +33,7 @@ jobs: - name: Covariance Validations test_1: ./CIValidations/CovarianceValidations test_2: ./CIValidations/MaCh3ModeValidations - test_3: empty + test_3: ./CIValidations/UnitTests/manager_tests test_4: empty test_5: empty test_6: empty @@ -75,7 +75,7 @@ jobs: cd MaCh3Validations mkdir build cd build - cmake ../ -DMaCh3_Branch=${{ github.head_ref }} + cmake ../ -DMaCh3_Branch=${{ github.head_ref }} -DMaCh3Tutorial_UNITTESTS_ENABLED=TRUE - name: Build MaCh3 Validations run: | diff --git a/.github/workflows/Doxygen.yml b/.github/workflows/Doxygen.yml index a12da9c65..2f3ce5474 100644 --- a/.github/workflows/Doxygen.yml +++ b/.github/workflows/Doxygen.yml @@ -48,11 +48,12 @@ jobs: - run: sudo apt-get install -y perl # Runs a single command using the runners shell - - name: Doxygen Action - uses: mattnotmitt/doxygen-action@1.9.8 + - uses: DenverCoder1/doxygen-github-pages-action@v2.0.0 with: - doxyfile-path: './Doxyfile' - working-directory: ./Doc + github_token: ${{ secrets.GITHUB_TOKEN }} + folder: Doc/html + branch: gh-pages + config_file: Doc/Doxyfile - name: Upload Doxygen Artifact uses: actions/upload-artifact@v4 @@ -60,7 +61,7 @@ jobs: retention-days: 1 name: DoxygenHTML path: Doc/html - + Sphinx: # The type of runner that the job will run on runs-on: ubuntu-latest @@ -120,7 +121,7 @@ jobs: name: DocumentationHTML path: to-upload - + Deploy: runs-on: ubuntu-latest needs: [Doxygen, Sphinx] diff --git a/.github/workflows/Newsletter.yml b/.github/workflows/Newsletter.yml index 40dd280f7..bb2604072 100644 --- a/.github/workflows/Newsletter.yml +++ b/.github/workflows/Newsletter.yml @@ -27,10 +27,17 @@ jobs: latest_tag=$(git describe --tags --abbrev=0) echo "latest_tag=${latest_tag}" >> "$GITHUB_ENV" - - name: Get the current date + - name: Get the most active author in the last week for MaCh3 run: | - current_date=$(date +"%d/%m/%Y") - echo "current_date=${current_date}" >> "$GITHUB_ENV" + most_active_author=$(git log --since='1 week ago' --pretty=format:"%an" | sort | uniq -c | sort -nr | head -n 1) + echo "most_active_author=${most_active_author}" >> "$GITHUB_ENV" + + - name: Get the author with most commits (count and name) + run: | + COUNT=$(echo "$most_active_author" | awk '{print $1}') + NAME=$(echo "$most_active_author" | awk '{$1=""; sub(/^ /, ""); print}') + echo "maCh3_most_active_author_count=${COUNT}" >> "$GITHUB_ENV" + echo "maCh3_most_active_author_name=${NAME}" >> "$GITHUB_ENV" - name: Checkout the MaCh3Tutorial repository uses: actions/checkout@v4 @@ -52,6 +59,18 @@ jobs: tutorial_latest_tag=$(git -C MaCh3Tutorial describe --tags --abbrev=0) echo "tutorial_latest_tag=${tutorial_latest_tag}" >> "$GITHUB_ENV" + - name: Get the most active author in the last week for MaCh3Tutorial + run: | + most_active_author=$(git -C MaCh3Tutorial log --since='1 week ago' --pretty=format:"%an" | sort | uniq -c | sort -nr | head -n 1) + echo "tutorial_most_active_author=${most_active_author}" >> "$GITHUB_ENV" + + - name: Get the author with most commits for MaCh3Tutorial (count and name) + run: | + COUNT=$(echo "$tutorial_most_active_author" | awk '{print $1}') + NAME=$(echo "$tutorial_most_active_author" | awk '{$1=""; sub(/^ /, ""); print}') + echo "maCh3Tutorial_most_active_author_count=${COUNT}" >> "$GITHUB_ENV" + echo "maCh3Tutorial_most_active_author_name=${NAME}" >> "$GITHUB_ENV" + - name: Checkout the MaCh3-PythonUtils repository uses: actions/checkout@v4 with: @@ -72,6 +91,18 @@ jobs: python_utils_latest_tag=$(git -C MaCh3-PythonUtils describe --tags --abbrev=0) echo "python_utils_latest_tag=${python_utils_latest_tag}" >> "$GITHUB_ENV" + - name: Get the most active author in the last week for MaCh3-PythonUtils + run: | + most_active_author=$(git -C MaCh3-PythonUtils log --since='1 week ago' --pretty=format:"%an" | sort | uniq -c | sort -nr | head -n 1) + echo "python_utils_most_active_author=${most_active_author}" >> "$GITHUB_ENV" + + - name: Get the author with most commits for MaCh3-PythonUtils (count and name) + run: | + COUNT=$(echo "$python_utils_most_active_author" | awk '{print $1}') + NAME=$(echo "$python_utils_most_active_author" | awk '{$1=""; sub(/^ /, ""); print}') + echo "maCh3PythonUtils_most_active_author_count=${COUNT}" >> "$GITHUB_ENV" + echo "maCh3PythonUtils_most_active_author_name=${NAME}" >> "$GITHUB_ENV" + - name: Create a new GitHub Discussion uses: abirismyname/create-discussion@v1.x env: @@ -87,16 +118,19 @@ jobs: - Total number of commits: ${{ env.commit_count }} - Total number of commits in the last week: ${{ env.commits_last_week }} - Most recent tag: ${{ env.latest_tag }} + - Most active author in the last week: ${{ env.maCh3_most_active_author_name }} (Commits: ${{ env.maCh3_most_active_author_count }}) **MaCh3Tutorial Repository** - Total number of commits: ${{ env.tutorial_commit_count }} - Total number of commits in the last week: ${{ env.tutorial_commits_last_week }} - Most recent tag: ${{ env.tutorial_latest_tag }} + - Most active author in the last week: ${{ env.maCh3Tutorial_most_active_author_name }} (Commits: ${{ env.maCh3Tutorial_most_active_author_count }}) **MaCh3-PythonUtils Repository** - Total number of commits: ${{ env.python_utils_commit_count }} - Total number of commits in the last week: ${{ env.python_utils_commits_last_week }} - Most recent tag: ${{ env.python_utils_latest_tag }} + - Most active author in the last week: ${{ env.maCh3PythonUtils_most_active_author_name }} (Commits: ${{ env.maCh3PythonUtils_most_active_author_count }}) Cheers, MaCh3-bot diff --git a/.gitignore b/.gitignore index 70c72f20d..8179065db 100644 --- a/.gitignore +++ b/.gitignore @@ -2,16 +2,10 @@ *.o *.sif -#ignore some stuff in libconfig -*.lo -*.la -*.Plo -lib/* - -build/ +build*/ install/ -Doc/latex/ -Doc/html/ +Doc/html +Doc/latex # Ignore emacs/vim stuff *#* diff --git a/.mailmap b/.mailmap index 165d945a1..3fecaeb98 100644 --- a/.mailmap +++ b/.mailmap @@ -6,6 +6,7 @@ Edward Atkin EdAtkin <35494466+EdAtkin@users.noreply.githu # Daniel Barrow Daniel Barrow dbarrow257 +Daniel Barrow dbarrow257 <31003009+dbarrow257@users.noreply.github.com> # Kamil Skwarczynski Kamil Skwarczynski Kamil <45295406+KSkwarczynski@users.noreply.github.com> @@ -18,6 +19,8 @@ Henry Wallace Henry Wallace <67589487+henry-wallace-ph Henry Wallace Henry Wallace Henry Wallace Henry Wallace Henry Wallace Henry Wallace +Henry Wallace Henry Wallace +Henry Wallace Henry Wallace # Ewan Miller Ewan Miller Ewan diff --git a/CMakeLists.txt b/CMakeLists.txt index 0dffa34e2..f760d90df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,17 +3,23 @@ set(CMAKE_VERBOSE_MAKEFILE ON) # CMake version check cmake_minimum_required(VERSION 3.14 FATAL_ERROR) -#KS: Enable language, necessary when using CUDA -enable_language(CXX) -set(MaCh3_VERSION 1.2.0) +project(MaCh3 VERSION 1.2.0 LANGUAGES CXX) +set(MaCh3_VERSION ${PROJECT_VERSION}) + +#LP - This option name is confusing, but I wont change it now. +option(USE_CPU "Whether to *only* use the CPU (i.e. no GPU)" OFF) +option(MaCh3_PYTHON_ENABLED "Whether to build MaCh3 python bindings" OFF) +option(MaCh3_WERROR_ENABLED "Whether to build MaCh3 with heightened compiler pedancy" ON) # Try to find CUDA +add_library(MaCh3CompileDefinitions INTERFACE) + find_package(CUDAToolkit QUIET) -add_library(MaCh3CompilerOptions INTERFACE) # Check if CUDA was found if(CUDAToolkit_FOUND AND NOT(USE_CPU)) message(STATUS "CUDA found. Adding CUDA support.") set(MaCh3_GPU_ENABLED TRUE) + enable_language(CUDA) set(CPU_ONLY FALSE) else() message(STATUS "CUDA not found. Proceeding without CUDA support.") @@ -21,12 +27,6 @@ else() set(CPU_ONLY TRUE) endif() -if(CPU_ONLY) - project(MaCh3 VERSION ${MaCh3_VERSION} LANGUAGES CXX) -else() - project(MaCh3 VERSION ${MaCh3_VERSION} LANGUAGES CXX CUDA) -endif() - # Changes default install path to be a subdirectory of the build dir. # Can set build dir at configure time with -DCMAKE_INSTALL_PREFIX=/install/path if(CMAKE_INSTALL_PREFIX STREQUAL "" OR CMAKE_INSTALL_PREFIX STREQUAL @@ -40,29 +40,26 @@ endif() find_program(CMAKE_C_COMPILER NAMES $ENV{CC} gcc PATHS ENV PATH NO_DEFAULT_PATH) find_program(CMAKE_CXX_COMPILER NAMES $ENV{CXX} g++ PATHS ENV PATH NO_DEFAULT_PATH) -LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules) +LIST(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/cmake/Modules) #KS: Load cmake function like DefineEnabledRequiredSwitch allowing to write more compact cmake -include(${CMAKE_CURRENT_LIST_DIR}/cmake/Modules/MaCh3Utils.cmake) +include(MaCh3Utils) ################################## Dependencies ################################ -#Loads all dependencies -include(${CMAKE_CURRENT_LIST_DIR}/cmake/Modules/MaCh3Dependencies.cmake) +include(MaCh3Dependencies) ############################ C++ Compiler #################################### if (NOT DEFINED CMAKE_CXX_STANDARD OR "${CMAKE_CXX_STANDARD} " STREQUAL " ") set(CMAKE_CXX_STANDARD 14) + cmessage(STATUS "Set default CXX standard: \"${CMAKE_CXX_STANDARD}\"") endif() -# KS: If C++ standard is lower than C++ standard used for ROOT compilation things will go terribly wrong -if(DEFINED ROOT_CXX_STANDARD AND ROOT_CXX_STANDARD GREATER CMAKE_CXX_STANDARD) +if(DEFINED ROOT_CXX_STANDARD AND NOT ROOT_CXX_STANDARD EQUAL CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD ${ROOT_CXX_STANDARD}) + cmessage(STATUS "Set CXX standard due to ROOT: \"${ROOT_CXX_STANDARD}\"") endif() -cmessage(STATUS "CMAKE CXX Standard: ${CMAKE_CXX_STANDARD}") - -# KS: ROOT changed cmake in 6.32, we should move avay from using Luke's hack, keep it for due to compatibility -if(ROOT_CXX_STANDARD LESS 14 AND ROOT_VERSION VERSION_LESS 6.32.00) - cmessage(WARNING "ROOT CXX STANDARD: ${ROOT_CXX_STANDARD}") -endif() +cmessage(STATUS "CMAKE_CXX_STANDARD: \"${CMAKE_CXX_STANDARD}\"") +set(MACH3_CXX_STANDARD ${CMAKE_CXX_STANDARD}) +set_property(GLOBAL PROPERTY MACH3_CXX_STANDARD "${MACH3_CXX_STANDARD}") set(MACH3_CXX_STANDARD ${CMAKE_CXX_STANDARD}) set_property(GLOBAL PROPERTY MACH3_CXX_STANDARD "${MACH3_CXX_STANDARD}") @@ -78,34 +75,48 @@ endif() set( CMAKE_EXPORT_COMPILE_COMMANDS ON ) # KS: Compile and link options for more see https://github.com/cpp-best-practices/cppbestpractices/tree/master -target_compile_options(MaCh3CompilerOptions INTERFACE - -g # Generate debug information - -Wextra # Enable extra warning flags - -Wall # Enable all standard warning flags - -pedantic # Enforce strict ISO compliance (all versions of GCC, Clang >= 3.2) - -Wshadow # Warn when a variable declaration shadows one from an outer scope - -Wuninitialized # Warn about uninitialized variables - -Wnon-virtual-dtor # Warn when a class with virtual functions has a non-virtual destructor - -Woverloaded-virtual # Warn when a function declaration hides a virtual function from a base class - -Wformat=2 # Warn on security issues around functions that format output (ie printf) - -Wunused # Warn on anything being unused - -Wredundant-decls # Warn about multiple declarations of the same entity. Useful for code cleanup. - #-Wstrict-aliasing=2 # Helps detect potential aliasing issues that could lead to undefined behavior. - #-Wuseless-cast # Warn if you perform a cast to the same type (only in GCC >= 4.8) - #-Wpadded # Warn when padding is added to a structure or class for alignment - #-Wnull-dereference # Warn if a null dereference is detected (only in GCC >= 6.0) - #-Wold-style-cast # Warn for c-style casts - #-Wconversion # Warn on type conversions that may lose data - #-Werror # Treat Warnings as Errors +add_library(MaCh3Warnings INTERFACE) +target_compile_options(MaCh3Warnings INTERFACE + -Wextra # Enable extra warning flags + -Wall # Enable all standard warning flags + -Wshadow # Warn when a variable declaration shadows one from an outer scope + -Wuninitialized # Warn about uninitialized variables + -Wnon-virtual-dtor # Warn when a class with virtual functions has a non-virtual destructor + -Woverloaded-virtual # Warn when a function declaration hides a virtual function from a base class + -Wformat=2 # Warn on security issues around functions that format output (ie printf) + -Wunused # Warn on anything being unused + -Wredundant-decls # Warn about multiple declarations of the same entity. Useful for code cleanup. + -Wstrict-aliasing=2 # Helps detect potential aliasing issues that could lead to undefined behavior. + -Wuseless-cast # Warn if you perform a cast to the same type (only in GCC >= 4.8) + -Wnull-dereference # Warn if a null dereference is detected (only in GCC >= 6.0) + -Wold-style-cast # Warn for c-style casts + -Wconversion # Warn on type conversions that may lose data + -Wformat-security # Warn on functions that are potentially insecure for formatting + -Walloca # Warn if `alloca` is used, as it can lead to stack overflows + #-Wswitch-enum # Warn if a `switch` statement on an enum does not cover all values + #-Wfloat-equal # Warn if floating-point values are compared directly + #-Wpadded # Warn when padding is added to a structure or class for alignment ) # KS Some compiler options are only available in GCC, in case we move to other compilers we will have to expand this if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") - target_compile_options(MaCh3CompilerOptions INTERFACE + target_compile_options(MaCh3Warnings INTERFACE -Wlogical-op # Warn about logical operations being used where bitwise were probably wanted (only in GCC) -Wduplicated-cond # Warn if if / else chain has duplicated conditions (only in GCC >= 6.0) -Wduplicated-branches # Warn if if / else branches have duplicated code (only in GCC >= 7.0) ) endif() +if(MaCh3_WERROR_ENABLED) +target_compile_options(MaCh3Warnings INTERFACE + -Werror # Treat Warnings as Errors +) +endif() + +add_library(MaCh3CompilerOptions INTERFACE) +target_link_libraries(MaCh3CompilerOptions INTERFACE MaCh3CompileDefinitions) +target_compile_options(MaCh3CompilerOptions INTERFACE + -g # Generate debug information + -pedantic # Enforce strict ISO compliance (all versions of GCC, Clang >= 3.2) +) #KS: If Debug is not defined disable it by default DefineEnabledRequiredSwitch(MaCh3_DEBUG_ENABLED FALSE) @@ -125,7 +136,7 @@ if(MaCh3_DEBUG_ENABLED) target_compile_options(MaCh3CompilerOptions INTERFACE -O0 # Turn off any optimisation to have best debug experience ) - target_compile_definitions(MaCh3CompilerOptions INTERFACE DEBUG=${DEBUG_LEVEL}) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE DEBUG=${DEBUG_LEVEL}) cmessage(STATUS "Enabling DEBUG with Level: \"${DEBUG_LEVEL}\"") else() #KS: Consider in future __attribute__((always_inline)) see https://indico.cern.ch/event/386232/sessions/159923/attachments/771039/1057534/always_inline_performance.pdf @@ -150,16 +161,16 @@ DefineEnabledRequiredSwitch(MaCh3_MULTITHREAD_ENABLED TRUE) if(MaCh3_MULTITHREAD_ENABLED) target_compile_options(MaCh3CompilerOptions INTERFACE -fopenmp) target_link_libraries(MaCh3CompilerOptions INTERFACE gomp) - target_compile_definitions(MaCh3CompilerOptions INTERFACE MULTITHREAD) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE MULTITHREAD) endif() if(CPU_ONLY) - target_compile_definitions(MaCh3CompilerOptions INTERFACE CPU_ONLY) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE CPU_ONLY) endif() if(MaCh3_GPU_ENABLED) - target_compile_definitions(MaCh3CompilerOptions INTERFACE CUDA) - target_compile_definitions(MaCh3CompilerOptions INTERFACE GPU_ON) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE CUDA) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE GPU_ON) endif() if(NOT DEFINED MaCh3_LOW_MEMORY_STRUCTS_ENABLED) @@ -167,11 +178,11 @@ if(NOT DEFINED MaCh3_LOW_MEMORY_STRUCTS_ENABLED) endif() if(MaCh3_LOW_MEMORY_STRUCTS_ENABLED) - target_compile_definitions(MaCh3CompilerOptions INTERFACE _LOW_MEMORY_STRUCTS_) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE _LOW_MEMORY_STRUCTS_) endif() set_target_properties(MaCh3CompilerOptions PROPERTIES EXPORT_NAME CompilerOptions) -install(TARGETS MaCh3CompilerOptions +install(TARGETS MaCh3CompilerOptions MaCh3CompileDefinitions EXPORT MaCh3-targets LIBRARY DESTINATION lib/) @@ -213,11 +224,10 @@ add_subdirectory(mcmc) add_subdirectory(Diagnostics) add_subdirectory(plotting) if (MaCh3_PYTHON_ENABLED) + set(CMAKE_INSTALL_RPATH "$ORIGIN/../lib") add_subdirectory(python) endif() - - #This is to export the target properties of MaCh3 #Anything that links to "MaCh3" will get all of these target properties add_library(MaCh3 INTERFACE) @@ -238,6 +248,7 @@ install(EXPORT MaCh3-targets NAMESPACE MaCh3:: DESTINATION ${CMAKE_INSTALL_PREFIX}/ ) +install(DIRECTORY cmake DESTINATION ${CMAKE_BINARY_DIR}) #KS: Options to print dependency graph DefineEnabledRequiredSwitch(MaCh3_DependancyGraph FALSE) diff --git a/Diagnostics/CMakeLists.txt b/Diagnostics/CMakeLists.txt index 556376bfa..63e0ee838 100644 --- a/Diagnostics/CMakeLists.txt +++ b/Diagnostics/CMakeLists.txt @@ -9,7 +9,7 @@ foreach(app ) add_executable(${app} ${app}.cpp) add_dependencies(DiagApps ${app}) - target_link_libraries(${app} MaCh3::All) + target_link_libraries(${app} MaCh3::All MaCh3Warnings) install(TARGETS ${app} DESTINATION bin) endforeach(app) diff --git a/Diagnostics/CombineMaCh3Chains.cpp b/Diagnostics/CombineMaCh3Chains.cpp index 536d1b6f1..eeac91f28 100644 --- a/Diagnostics/CombineMaCh3Chains.cpp +++ b/Diagnostics/CombineMaCh3Chains.cpp @@ -21,8 +21,8 @@ bool checkSoftwareVersions(TFile *file, TFile *prevFile, const std::string& Conf { bool weirdFile = false; - TMacro *versionHeader = (TMacro*)file->Get(ConfigName.c_str()); - TMacro *prevVersionHeader = (TMacro*)prevFile->Get(ConfigName.c_str()); + TMacro *versionHeader = file->Get(ConfigName.c_str()); + TMacro *prevVersionHeader = prevFile->Get(ConfigName.c_str()); // EM: compare the digest of the version header file in this file, with the previous one if((versionHeader == NULL) && (prevVersionHeader == NULL)){ @@ -58,12 +58,12 @@ void CopyDir(TDirectory *source) { //copy all objects and subdirs of directory source as a subdir of the current directory source->ls(); TDirectory *savdir = gDirectory; - TDirectory *adir = (TDirectory*)savdir->Get(source->GetName()); + TDirectory *adir = savdir->Get(source->GetName()); adir->cd(); //loop on all entries of this directory TKey *key; TIter nextkey(source->GetListOfKeys()); - while ((key = (TKey*)nextkey())) { + while ((key = static_cast(nextkey()))) { const char *classname = key->GetClassName(); TClass *cl = gROOT->GetClass(classname); if (!cl) continue; @@ -74,7 +74,7 @@ void CopyDir(TDirectory *source) { CopyDir(subdir); adir->cd(); } else if (cl->InheritsFrom("TTree")) { - TTree *T = (TTree*)source->Get(key->GetName()); + TTree *T = source->Get(key->GetName()); adir->cd(); TTree *newT = T->CloneTree(); newT->Write(); @@ -165,7 +165,7 @@ void CombineChain() outputFile->cd(); // EM: write out the version and config files to the combined file - TMacro *MaCh3_Config = (TMacro*)prevFile->Get("MaCh3_Config"); + TMacro *MaCh3_Config = prevFile->Get("MaCh3_Config"); if(MaCh3_Config != NULL) MaCh3_Config->Write(); delete MaCh3_Config; @@ -184,8 +184,8 @@ void CombineChain() outputFile = new TFile(OutFileName.c_str(), "UPDATE"); // Get the source directory - TDirectory *MaCh3EngineDir = (TDirectory*)prevFile->Get("MaCh3Engine"); - TDirectory *CovarianceFolderDir = (TDirectory*)prevFile->Get("CovarianceFolder"); + TDirectory *MaCh3EngineDir = prevFile->Get("MaCh3Engine"); + TDirectory *CovarianceFolderDir = prevFile->Get("CovarianceFolder"); outputFile->cd(); CopyDir(MaCh3EngineDir); diff --git a/Diagnostics/DiagMCMC.cpp b/Diagnostics/DiagMCMC.cpp index f1245b733..854b36536 100644 --- a/Diagnostics/DiagMCMC.cpp +++ b/Diagnostics/DiagMCMC.cpp @@ -12,8 +12,7 @@ void DiagMCMC(const std::string& inputFile, const std::string& config) YAML::Node Settings = YAML::LoadFile(config); // Make the processor - MCMCProcessor* Processor = new MCMCProcessor(inputFile); - + auto Processor = std::make_unique(inputFile); Processor->SetOutputSuffix("_MCMC_Diag"); //KS:Turn off plotting detector and some other setting Processor->SetExcludedTypes(GetFromManager>(Settings["DiagMCMC"]["ExcludedTypes"], {""})); @@ -27,8 +26,6 @@ void DiagMCMC(const std::string& inputFile, const std::string& config) //KS: finally call main method Processor->DiagMCMC(); - - delete Processor; } int main(int argc, char *argv[]) { diff --git a/Diagnostics/GetPenaltyTerm.cpp b/Diagnostics/GetPenaltyTerm.cpp index 72f231896..525099421 100644 --- a/Diagnostics/GetPenaltyTerm.cpp +++ b/Diagnostics/GetPenaltyTerm.cpp @@ -31,7 +31,7 @@ /// /// @todo KS: This should really be moved to MCMC Processor -std::vector nominal; +std::vector nominal; std::vector isFlat; std::vector BranchNames; std::vector ParamNames; @@ -45,10 +45,10 @@ void ReadXSecFile(const std::string& inputFile) TFile *TempFile = new TFile(inputFile.c_str(), "open"); // Get the matrix - TMatrixDSym *XSecMatrix = (TMatrixDSym*)(TempFile->Get("CovarianceFolder/xsec_cov")); + TMatrixDSym *XSecMatrix = TempFile->Get("CovarianceFolder/xsec_cov"); // Get the settings for the MCMC - TMacro *Config = (TMacro*)(TempFile->Get("MaCh3_Config")); + TMacro *Config = TempFile->Get("MaCh3_Config"); if (Config == nullptr) { MACH3LOG_ERROR("Didn't find MaCh3_Config tree in MCMC file! {}", inputFile); TempFile->ls(); @@ -110,7 +110,7 @@ void ReadXSecFile(const std::string& inputFile) } } #ifdef MULTITHREAD - #pragma omp parallel for + #pragma omp parallel for collapse(2) #endif for (int i = 0; i < size; i++) { @@ -126,14 +126,15 @@ void ReadXSecFile(const std::string& inputFile) void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) { - TCanvas* canvas = new TCanvas("canvas", "canvas", 0, 0, 1024, 1024); + auto canvas = std::make_unique("canvas", "canvas", 0, 0, 1024, 1024); canvas->SetGrid(); canvas->SetTickx(); canvas->SetTicky(); - canvas->SetBottomMargin(0.1); - canvas->SetTopMargin(0.02); - canvas->SetRightMargin(0.08); - canvas->SetLeftMargin(0.15); + + canvas->SetBottomMargin(0.1f); + canvas->SetTopMargin(0.02f); + canvas->SetRightMargin(0.08f); + canvas->SetLeftMargin(0.15f); gStyle->SetOptTitle(0); gStyle->SetOptStat(0); @@ -146,7 +147,7 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) Chain->Add(inputFile.c_str()); // Get the list of branches - TObjArray* brlis = (TObjArray*)(Chain->GetListOfBranches()); + TObjArray* brlis = Chain->GetListOfBranches(); // Get the number of branches int nBranches = brlis->GetEntries(); @@ -154,7 +155,11 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) for (int i = 0; i < nBranches; i++) { // Get the TBranch and its name - TBranch* br = (TBranch*)brlis->At(i); + TBranch* br = static_cast(brlis->At(i)); + if(!br){ + MACH3LOG_ERROR("Invalid branch at position {}", i); + throw MaCh3Exception(__FILE__,__LINE__); + } TString bname = br->GetName(); // If we're on beam systematics @@ -168,7 +173,7 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) // Set all the branches to off Chain->SetBranchStatus("*", false); - double* fParProp = new double[RelevantBranches]; + std::vector fParProp(RelevantBranches); // Turn on the branches which we want for parameters for (int i = 0; i < RelevantBranches; ++i) { @@ -197,7 +202,7 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) FancyTittle.push_back(Set[2].as()); } - const int NSets = SetsNames.size(); + const int NSets = int(SetsNames.size()); isRelevantParam.resize(NSets); //Loop over sets in the config @@ -244,15 +249,14 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) MACH3LOG_INFO(" Found {} params for set {}", counter, SetsNames[i]); } - int AllEvents = Chain->GetEntries(); - TH1D **hLogL = new TH1D *[NSets]; - for(int i = 0; i < NSets; i++) - { + int AllEvents = int(Chain->GetEntries()); + std::vector> hLogL(NSets); + for (int i = 0; i < NSets; i++) { std::string NameTemp = "LogL_" + SetsNames[i]; - hLogL[i] = new TH1D(NameTemp.c_str(), NameTemp.c_str(), AllEvents, 0 , AllEvents); + hLogL[i] = std::make_unique(NameTemp.c_str(), NameTemp.c_str(), AllEvents, 0, AllEvents); hLogL[i]->SetLineColor(kBlue); } - double* logL = new double[NSets](); + std::vector logL(NSets, 0.0); for(int n = 0; n < AllEvents; ++n) { if(n%10000 == 0) MaCh3Utils::PrintProgressBar(n, AllEvents); @@ -332,7 +336,6 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) } }//End loop over steps - delete[] logL; // Directory for posteriors std::string OutputName = inputFile + "_PenaltyTerm" +".root"; TFile* OutputFile = new TFile(OutputName.c_str(), "recreate"); @@ -346,7 +349,7 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) hLogL[i]->SetTitle(FancyTittle[i].c_str()); hLogL[i]->GetXaxis()->SetTitle("Step"); hLogL[i]->GetYaxis()->SetTitle(FancyTittle[i].c_str()); - hLogL[i]->GetYaxis()->SetTitleOffset(1.4); + hLogL[i]->GetYaxis()->SetTitleOffset(1.4f); hLogL[i]->Draw(""); @@ -354,13 +357,9 @@ void GetPenaltyTerm(const std::string& inputFile, const std::string& configFile) hLogL[i]->Write(); canvas->Print(Form("%s_PenaltyTerm.pdf",inputFile.c_str()), "pdf"); - delete hLogL[i]; } canvas->Print(Form("%s_PenaltyTerm.pdf]",inputFile.c_str()), "pdf"); - delete[] hLogL; - delete[] fParProp; delete Chain; - delete canvas; for (int i = 0; i < size; i++) { diff --git a/Diagnostics/ProcessMCMC.cpp b/Diagnostics/ProcessMCMC.cpp index ceea27fb6..7abca6cbf 100644 --- a/Diagnostics/ProcessMCMC.cpp +++ b/Diagnostics/ProcessMCMC.cpp @@ -2,21 +2,26 @@ #include "mcmc/MCMCProcessor.h" #include "manager/manager.h" +/// @file ProcessMCMC.cpp +/// @brief Main exectable responsible for different types of MCMC processing like drawing posteriors, triangle plots etc. Actual implantation of methods is in MCMCProcessor + /// @brief Main function processing MCMC and Producing plots inline void ProcessMCMC(const std::string& inputFile); /// @brief Function producing comparison of posterior and more betwen a few MCMC chains inline void MultipleProcessMCMC(); -inline void CalcBayesFactor(MCMCProcessor* Processor); -inline void CalcSavageDickey(MCMCProcessor* Processor); -inline void CalcBipolarPlot(MCMCProcessor* Processor); -inline void CalcParameterEvolution(MCMCProcessor* Processor); -inline void GetTrianglePlot(MCMCProcessor* Processor); -inline void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& inputFile); -inline void ReweightPrior(MCMCProcessor* Processor); +inline void CalcBayesFactor(const std::unique_ptr& Processor); +inline void CalcSavageDickey(const std::unique_ptr& Processor); +inline void CalcBipolarPlot(const std::unique_ptr& Processor); +inline void CalcParameterEvolution(const std::unique_ptr& Processor); +inline void GetTrianglePlot(const std::unique_ptr& Processor); +inline void DiagnoseCovarianceMatrix(const std::unique_ptr& Processor, const std::string& inputFile); +inline void ReweightPrior(const std::unique_ptr& Processor); /// @brief KS: Convert TMatrix to TH2D, mostly useful for making fancy plots inline TH2D* TMatrixIntoTH2D(TMatrixDSym* Matrix, const std::string& title); /// @brief KS: Perform KS test to check if two posteriors for the same parameter came from the same distribution -inline void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TString canvasname); +inline void KolmogorovSmirnovTest(const std::vector>& Processor, + const std::unique_ptr& Posterior, + const TString& canvasname); int nFiles; std::vector FileNames; @@ -60,7 +65,6 @@ int main(int argc, char *argv[]) MultipleProcessMCMC(); } - return 0; } @@ -68,12 +72,12 @@ void ProcessMCMC(const std::string& inputFile) { MACH3LOG_INFO("File for study: {} with config {}", inputFile, config); // Make the processor) - MCMCProcessor* Processor = new MCMCProcessor(inputFile); + auto Processor = std::make_unique(inputFile); YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; - bool PlotCorr = GetFromManager(Settings["PlotCorr"], false); + const bool PlotCorr = GetFromManager(Settings["PlotCorr"], false); Processor->SetExcludedTypes(GetFromManager>(Settings["ExcludedTypes"], {""})); Processor->SetExcludedNames(GetFromManager>(Settings["ExcludedNames"], {""})); @@ -93,7 +97,12 @@ void ProcessMCMC(const std::string& inputFile) Processor->SetPost2DPlotThreshold(GetFromManager(Settings["Post2DPlotThreshold"], 0.2)); Processor->Initialise(); - + if(Settings["Thinning"]) + { + if(Settings["Thinning"][0].as()){ + Processor->ThinMCMC(Settings["Thinning"][1].as()); + } + } // Make the postfit Processor->MakePostfit(); Processor->DrawPostfit(); @@ -139,8 +148,6 @@ void ProcessMCMC(const std::string& inputFile) if(GetFromManager(Settings["DiagnoseCovarianceMatrix"], false)) DiagnoseCovarianceMatrix(Processor, inputFile); } if(GetFromManager(Settings["ReweightPrior"], false)) ReweightPrior(Processor); - - delete Processor; } void MultipleProcessMCMC() @@ -148,16 +155,15 @@ void MultipleProcessMCMC() YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; - const Color_t PosteriorColor[] = {kBlue-1, kRed, kGreen+2}; - //const Style_t PosteriorStyle[] = {kSolid, kDashed, kDotted}; - nFiles = FileNames.size(); - MCMCProcessor** Processor; - Processor = new MCMCProcessor*[nFiles]; + constexpr Color_t PosteriorColor[] = {kBlue-1, kRed, kGreen+2}; + //constexpr Style_t PosteriorStyle[] = {kSolid, kDashed, kDotted}; + nFiles = int(FileNames.size()); + std::vector> Processor(nFiles); for (int ik = 0; ik < nFiles; ik++) { MACH3LOG_INFO("File for study: {}", FileNames[ik]); // Make the processor - Processor[ik] = new MCMCProcessor(FileNames[ik]); + Processor[ik] = std::make_unique(FileNames[ik]); Processor[ik]->SetOutputSuffix(("_" + std::to_string(ik)).c_str()); Processor[ik]->SetExcludedTypes(GetFromManager>(Settings["ExcludedTypes"], {""})); @@ -179,14 +185,14 @@ void MultipleProcessMCMC() } // Open a TCanvas to write the posterior onto - TCanvas* Posterior = new TCanvas("PosteriorMulti", "PosteriorMulti", 0, 0, 1024, 1024); + auto Posterior = std::make_unique("PosteriorMulti", "PosteriorMulti", 0, 0, 1024, 1024); gStyle->SetOptStat(0); gStyle->SetOptTitle(0); Posterior->SetGrid(); - Posterior->SetBottomMargin(0.1); - Posterior->SetTopMargin(0.05); - Posterior->SetRightMargin(0.03); - Posterior->SetLeftMargin(0.10); + Posterior->SetBottomMargin(0.1f); + Posterior->SetTopMargin(0.05f); + Posterior->SetRightMargin(0.03f); + Posterior->SetLeftMargin(0.10f); FileNames[0] = FileNames[0].substr(0, FileNames[0].find(".root")-1); TString canvasname = FileNames[0]; @@ -208,9 +214,9 @@ void MultipleProcessMCMC() for(int i = 0; i < Processor[0]->GetNParams(); ++i) { // This holds the posterior density - TH1D **hpost = new TH1D*[nFiles]; - TLine **hpd = new TLine*[nFiles]; - hpost[0] = (TH1D *) (Processor[0]->GetHpost(i))->Clone(); + std::vector hpost(nFiles); + std::vector> hpd(nFiles); + hpost[0] = static_cast(Processor[0]->GetHpost(i)->Clone()); bool Skip = false; for (int ik = 1 ; ik < nFiles; ik++) @@ -222,7 +228,7 @@ void MultipleProcessMCMC() Skip = true; break; } - hpost[ik] = (TH1D *)(Processor[ik]->GetHpost(Index))->Clone(); + hpost[ik] = static_cast(Processor[ik]->GetHpost(Index)->Clone()); } // Don't plot if this is a fixed histogram (i.e. the peak is the whole integral) @@ -231,8 +237,6 @@ void MultipleProcessMCMC() for (int ik = 0; ik < nFiles; ik++) delete hpost[ik]; - delete[] hpost; - delete[] hpd; continue; } for (int ik = 0; ik < nFiles; ik++) @@ -254,27 +258,28 @@ void MultipleProcessMCMC() Processor[0]->GetNthParameter(i, Prior, PriorError, Title); // Now make the TLine for the Asimov - TLine *Asimov = new TLine(Prior, hpost[0]->GetMinimum(), Prior, hpost[0]->GetMaximum()); + auto Asimov = std::make_unique(Prior, hpost[0]->GetMinimum(), Prior, hpost[0]->GetMaximum()); Asimov->SetLineColor(kRed-3); Asimov->SetLineWidth(2); Asimov->SetLineStyle(kDashed); // Make a nice little TLegend - TLegend *leg = new TLegend(0.12, 0.7, 0.6, 0.97); - leg->SetTextSize(0.03); + auto leg = std::make_unique(0.12, 0.7, 0.6, 0.97); + leg->SetTextSize(0.03f); leg->SetFillColor(0); leg->SetFillStyle(0); leg->SetLineColor(0); leg->SetLineStyle(0); TString asimovLeg = Form("#splitline{Prior}{x = %.2f , #sigma = %.2f}", Prior, PriorError); - leg->AddEntry(Asimov, asimovLeg, "l"); + leg->AddEntry(Asimov.get(), asimovLeg, "l"); for (int ik = 0; ik < nFiles; ik++) { TString rebinLeg = Form("#splitline{%s}{#mu = %.2f, #sigma = %.2f}", TitleNames[ik].c_str(), hpost[ik]->GetMean(), hpost[ik]->GetRMS()); leg->AddEntry(hpost[ik], rebinLeg, "l"); - hpd[ik] = new TLine(hpost[ik]->GetBinCenter(hpost[ik]->GetMaximumBin()), hpost[ik]->GetMinimum(), hpost[ik]->GetBinCenter(hpost[ik]->GetMaximumBin()), hpost[ik]->GetMaximum()); + hpd[ik] = std::make_unique(hpost[ik]->GetBinCenter(hpost[ik]->GetMaximumBin()), hpost[ik]->GetMinimum(), + hpost[ik]->GetBinCenter(hpost[ik]->GetMaximumBin()), hpost[ik]->GetMaximum()); hpd[ik]->SetLineColor(hpost[ik]->GetLineColor()); hpd[ik]->SetLineWidth(2); hpd[ik]->SetLineStyle(kSolid); @@ -292,16 +297,9 @@ void MultipleProcessMCMC() leg->Draw("same"); Posterior->cd(); Posterior->Print(canvasname); - - delete Asimov; - delete leg; - for (int ik = 0; ik < nFiles; ik++) - { + for (int ik = 0; ik < nFiles; ik++) { delete hpost[ik]; - delete hpd[ik]; } - delete[] hpost; - delete[] hpd; }//End loop over parameters // Finally draw the parameter plot onto the PDF @@ -315,14 +313,10 @@ void MultipleProcessMCMC() MACH3LOG_INFO("Closing pdf {}", canvasname); canvasname+="]"; Posterior->Print(canvasname); - - delete Posterior; - for (int ik = 0; ik < nFiles; ik++) delete Processor[ik]; - delete[] Processor; } // KS: Calculate Bayes factor for a given hypothesis, most informative are those related to osc params. However, it make relative easy interpretation for switch dials -void CalcBayesFactor(MCMCProcessor* Processor) +void CalcBayesFactor(const std::unique_ptr& Processor) { YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; @@ -340,10 +334,9 @@ void CalcBayesFactor(MCMCProcessor* Processor) } Processor->GetBayesFactor(ParNames, Model1Bounds, Model2Bounds, ModelNames); - return; } -void CalcSavageDickey(MCMCProcessor* Processor) +void CalcSavageDickey(const std::unique_ptr& Processor) { YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; @@ -358,45 +351,38 @@ void CalcSavageDickey(MCMCProcessor* Processor) EvaluationPoint.push_back(d[1].as()); Bounds.push_back(d[2].as>()); } - Processor->GetSavageDickey(ParNames, EvaluationPoint, Bounds); - return; } -void CalcParameterEvolution(MCMCProcessor* Processor) +void CalcParameterEvolution(const std::unique_ptr& Processor) { YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; std::vector ParNames; std::vector Intervals; - for (const auto& d : Settings["ParameterEvolution"]) { ParNames.push_back(d[0].as()); Intervals.push_back(d[1].as()); } Processor->ParameterEvolution(ParNames, Intervals); - return; } -void CalcBipolarPlot(MCMCProcessor* Processor) +void CalcBipolarPlot(const std::unique_ptr& Processor) { YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; std::vector ParNames; - for (const auto& d : Settings["BipolarPlot"]) { ParNames.push_back(d[0].as()); } Processor->GetPolarPlot(ParNames); - return; } - -void GetTrianglePlot(MCMCProcessor* Processor) { +void GetTrianglePlot(const std::unique_ptr& Processor) { YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; @@ -416,21 +402,21 @@ void GetTrianglePlot(MCMCProcessor* Processor) { } /// @brief KS: You validate stability of posterior covariance matrix, you set burn calc cov matrix increase burn calc again and compare. By performing such operation several hundred times we can check when matrix becomes stable -void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& inputFile) +void DiagnoseCovarianceMatrix(const std::unique_ptr& Processor, const std::string& inputFile) { //Turn of plots from Processor Processor->SetPrintToPDF(false); // Open a TCanvas to write the posterior onto - TCanvas* Canvas = new TCanvas("Canvas", "Canvas", 0, 0, 1024, 1024); + auto Canvas = std::make_unique("Canvas", "Canvas", 0, 0, 1024, 1024); Canvas->SetGrid(); gStyle->SetOptStat(0); gStyle->SetOptTitle(0); Canvas->SetTickx(); Canvas->SetTicky(); - Canvas->SetBottomMargin(0.1); - Canvas->SetTopMargin(0.05); - Canvas->SetRightMargin(0.15); - Canvas->SetLeftMargin(0.10); + Canvas->SetBottomMargin(0.1f); + Canvas->SetTopMargin(0.05f); + Canvas->SetRightMargin(0.15f); + Canvas->SetLeftMargin(0.10f); //KS: Fancy colours const int NRGBs = 10; @@ -450,7 +436,7 @@ void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& input YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; - const int entries = Processor->GetnSteps(); + const int entries = int(Processor->GetnSteps()); const int NIntervals = GetFromManager(Settings["NIntervals"], 5); const int IntervalsSize = entries/NIntervals; //We start with burn from 0 (no burn in at all) @@ -489,8 +475,8 @@ void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& input CovarianceHist = TMatrixIntoTH2D(Covariance, "Covariance"); CorrelationHist = TMatrixIntoTH2D(Correlation, "Correlation"); - TH2D *CovarianceDiff = (TH2D*)CovarianceHist->Clone("Covariance_Ratio"); - TH2D *CorrelationDiff = (TH2D*)CorrelationHist->Clone("Correlation_Ratio"); + TH2D *CovarianceDiff = static_cast(CovarianceHist->Clone("Covariance_Ratio")); + TH2D *CorrelationDiff = static_cast(CorrelationHist->Clone("Correlation_Ratio")); //KS: Bit messy but quite often covariance is 0 is divided by 0 is problematic so #ifdef MULTITHREAD @@ -530,10 +516,10 @@ void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& input CorrelationDiff->GetXaxis()->SetBinLabel(j+1, Title); CorrelationDiff->GetYaxis()->SetBinLabel(j+1, Title); } - CovarianceDiff->GetXaxis()->SetLabelSize(0.015); - CovarianceDiff->GetYaxis()->SetLabelSize(0.015); - CorrelationDiff->GetXaxis()->SetLabelSize(0.015); - CorrelationDiff->GetYaxis()->SetLabelSize(0.015); + CovarianceDiff->GetXaxis()->SetLabelSize(0.015f); + CovarianceDiff->GetYaxis()->SetLabelSize(0.015f); + CorrelationDiff->GetXaxis()->SetLabelSize(0.015f); + CorrelationDiff->GetYaxis()->SetLabelSize(0.015f); std::stringstream ss; ss << "BCut_"; @@ -562,9 +548,9 @@ void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& input //KS: Current hist become previous as we need it for further comparison delete CovariancePreviousHist; - CovariancePreviousHist = (TH2D*)CovarianceHist->Clone(); + CovariancePreviousHist = static_cast(CovarianceHist->Clone()); delete CorrelationPreviousHist; - CorrelationPreviousHist = (TH2D*)CorrelationHist->Clone();; + CorrelationPreviousHist = static_cast(CorrelationHist->Clone()); delete CovarianceHist; CovarianceHist = nullptr; @@ -589,17 +575,14 @@ void DiagnoseCovarianceMatrix(MCMCProcessor* Processor, const std::string& input if(CorrelationPreviousHist != nullptr) delete CorrelationPreviousHist; if(CovarianceHist != nullptr) delete CovarianceHist; if(CorrelationHist != nullptr) delete CorrelationHist; - delete Canvas; } -void ReweightPrior(MCMCProcessor* Processor) +void ReweightPrior(const std::unique_ptr& Processor) { - YAML::Node card_yaml = YAML::LoadFile(config.c_str()); YAML::Node Settings = card_yaml["ProcessMCMC"]; const auto& Prior = Settings["PriorReweighting"]; - std::vector Names = Prior[0].as>(); std::vector NewCentral = Prior[1].as>(); std::vector NewError = Prior[2].as>(); @@ -623,7 +606,9 @@ TH2D* TMatrixIntoTH2D(TMatrixDSym* Matrix, const std::string& title) } //KS: Perform KS test to check if two posteriors for the same parameter came from the same distribution -void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TString canvasname) +void KolmogorovSmirnovTest(const std::vector>& Processor, + const std::unique_ptr& Posterior, + const TString& canvasname) { const Color_t CumulativeColor[] = {kBlue-1, kRed, kGreen+2}; const Style_t CumulativeStyle[] = {kSolid, kDashed, kDotted}; @@ -631,8 +616,8 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin for(int i = 0; i < Processor[0]->GetNParams(); ++i) { // This holds the posterior density - TH1D **hpost = new TH1D*[nFiles]; - TH1D **CumulativeDistribution = new TH1D*[nFiles]; + std::vector hpost(nFiles); + std::vector CumulativeDistribution(nFiles); TString Title; double Prior = 1.0; @@ -654,8 +639,8 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin break; } } - hpost[ik] = (TH1D*) (Processor[ik]->GetHpost(Index))->Clone(); - CumulativeDistribution[ik] = (TH1D*) (Processor[ik]->GetHpost(Index))->Clone(); + hpost[ik] = static_cast(Processor[ik]->GetHpost(Index)->Clone()); + CumulativeDistribution[ik] = static_cast(Processor[ik]->GetHpost(Index)->Clone()); CumulativeDistribution[ik]->Fill(0., 0.); CumulativeDistribution[ik]->Reset(); CumulativeDistribution[ik]->SetMaximum(1.); @@ -679,8 +664,6 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin delete hpost[ik]; delete CumulativeDistribution[ik]; } - delete[] hpost; - delete[] CumulativeDistribution; continue; } @@ -698,20 +681,17 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin CumulativeDistribution[ik]->SetBinContent(NumberOfBins+1, 1.); } - int* TestStatBin = new int[nFiles]; - double* TestStatD = new double[nFiles]; - TLine **LineD = new TLine*[nFiles]; - - for (int ik = 0 ; ik < nFiles; ik++) { TestStatBin[ik] = 0; TestStatD[ik] = -999;} - + std::vector TestStatBin(nFiles, 0); + std::vector TestStatD(nFiles, -999); + std::vector> LineD(nFiles); //Find KS statistic for (int ik = 1 ; ik < nFiles; ik++) { const int NumberOfBins = CumulativeDistribution[0]->GetXaxis()->GetNbins(); for (int j = 1; j < NumberOfBins+1; ++j) { - double BinValue = CumulativeDistribution[0]->GetBinCenter(j); - int BinNumber = CumulativeDistribution[ik]->FindBin(BinValue); + const double BinValue = CumulativeDistribution[0]->GetBinCenter(j); + const int BinNumber = CumulativeDistribution[ik]->FindBin(BinValue); //KS: Calculate D statistic for this bin, only save it if it's bigger than previously found value double TempDstat = std::fabs(CumulativeDistribution[0]->GetBinContent(j) - CumulativeDistribution[ik]->GetBinContent(BinNumber)); if(TempDstat > TestStatD[ik]) @@ -724,7 +704,7 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin for (int ik = 0 ; ik < nFiles; ik++) { - LineD[ik] = new TLine(CumulativeDistribution[0]->GetBinCenter(TestStatBin[ik]), 0, CumulativeDistribution[0]->GetBinCenter(TestStatBin[ik]), CumulativeDistribution[0]->GetBinContent(TestStatBin[ik])); + LineD[ik] = std::make_unique(CumulativeDistribution[0]->GetBinCenter(TestStatBin[ik]), 0, CumulativeDistribution[0]->GetBinCenter(TestStatBin[ik]), CumulativeDistribution[0]->GetBinContent(TestStatBin[ik])); LineD[ik]->SetLineColor(CumulativeColor[ik]); LineD[ik]->SetLineWidth(2.0); } @@ -732,12 +712,12 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin for (int ik = 0 ; ik < nFiles; ik++) CumulativeDistribution[ik]->Draw("SAME"); - TLegend *leg = new TLegend(0.15, 0.7, 0.5, 0.90); - leg->SetTextSize(0.04); + auto leg = std::make_unique(0.15, 0.7, 0.5, 0.90); + leg->SetTextSize(0.04f); for (int ik = 0; ik < nFiles; ik++) leg->AddEntry(CumulativeDistribution[ik], TitleNames[ik].c_str(), "l"); for (int ik = 1; ik < nFiles; ik++) - leg->AddEntry(LineD[ik], Form("#Delta D = %.4f", TestStatD[ik]), "l"); + leg->AddEntry(LineD[ik].get(), Form("#Delta D = %.4f", TestStatD[ik]), "l"); leg->SetLineColor(0); leg->SetLineStyle(0); @@ -751,17 +731,10 @@ void KolmogorovSmirnovTest(MCMCProcessor** Processor, TCanvas* Posterior, TStrin Posterior->cd(); Posterior->Print(canvasname); - delete leg; for (int ik = 0; ik < nFiles; ik++) { delete hpost[ik]; delete CumulativeDistribution[ik]; - delete LineD[ik]; } - delete[] hpost; - delete[] CumulativeDistribution; - delete[] LineD; - delete[] TestStatBin; - delete[] TestStatD; } //End loop over parameter } diff --git a/Diagnostics/RHat.cpp b/Diagnostics/RHat.cpp index 364274ae4..86ffc76d0 100644 --- a/Diagnostics/RHat.cpp +++ b/Diagnostics/RHat.cpp @@ -36,18 +36,18 @@ int Nchains; int nDraw; -std::vector BranchNames; -std::vector MCMCFile; +std::vector BranchNames; +std::vector MCMCFile; std::vector ValidPar; double ***Draws; double** Mean; double** StandardDeviation; - + double* MeanGlobal; double* StandardDeviationGlobal; - + double* BetweenChainVariance; double* MarginalPosteriorVariance; double* RHat; @@ -58,10 +58,10 @@ double* MedianArr; double** MeanFolded; double** StandardDeviationFolded; - + double* MeanGlobalFolded; double* StandardDeviationGlobalFolded; - + double* BetweenChainVarianceFolded; double* MarginalPosteriorVarianceFolded; double* RHatFolded; @@ -81,7 +81,7 @@ void CapVariable(double var, double cap); // ******************* int main(int argc, char *argv[]) { -// ******************* +// ******************* SetMaCh3LoggerFormat(); MaCh3Utils::MaCh3Welcome(); @@ -188,13 +188,13 @@ void PrepareChains() { TChain* Chain = new TChain("posteriors"); Chain->Add(MCMCFile[m].c_str()); MACH3LOG_INFO("On file: {}", MCMCFile[m].c_str()); - nEntries[m] = Chain->GetEntries(); + nEntries[m] = int(Chain->GetEntries()); // Set the step cut to be 20% BurnIn[m] = nEntries[m]/5; // Get the list of branches - TObjArray* brlis = (TObjArray*)(Chain->GetListOfBranches()); + TObjArray* brlis = Chain->GetListOfBranches(); // Get the number of branches nBranches[m] = brlis->GetEntries(); @@ -209,7 +209,11 @@ void PrepareChains() { for (int i = 0; i < nBranches[m]; i++) { // Get the TBranch and its name - TBranch* br = (TBranch*)brlis->At(i); + TBranch* br = static_cast(brlis->At(i)); + if(!br){ + MACH3LOG_ERROR("Invalid branch at position {}", i); + throw MaCh3Exception(__FILE__,__LINE__); + } TString bname = br->GetName(); // Read in the step @@ -243,7 +247,7 @@ void PrepareChains() { } } - if(m == 0) nDraw = BranchNames.size(); + if(m == 0) nDraw = int(BranchNames.size()); //TN: Qualitatively faster sanity check, with the very same outcome (all chains have the same #branches) if(m > 0) @@ -283,7 +287,7 @@ void PrepareChains() { for (int i = 0; i < Ntoys; i++) { // Get a random entry after burn in - int entry = (int)(nEntries[m]*rnd->Rndm()); + int entry = int(nEntries[m]*rnd->Rndm()); Chain->GetEntry(entry); @@ -297,7 +301,7 @@ void PrepareChains() { // Output some info for the user if (Ntoys > 10 && i % (Ntoys/10) == 0) { - MaCh3Utils::PrintProgressBar(i+m*Ntoys, Ntoys*Nchains); + MaCh3Utils::PrintProgressBar(i+m*Ntoys, static_cast(Ntoys)*Nchains); MACH3LOG_DEBUG("Getting random entry {}", entry); } @@ -323,7 +327,7 @@ void PrepareChains() { for(int j = 0; j < nDraw; j++) { MedianArr[j] = 0.; - std::vector TempDraws(Ntoys * Nchains); + std::vector TempDraws(static_cast(Ntoys) * Nchains); for(int m = 0; m < Nchains; m++) { for(int i = 0; i < Ntoys; i++) @@ -416,7 +420,7 @@ void InitialiseArrays() { // ******************* void RunDiagnostic() { -// ******************* +// ******************* CalcRhat(); //In case in future we expand this } @@ -426,17 +430,17 @@ void RunDiagnostic() { // Probably most of it could be moved cleverly to MCMC Processor, keep it separate for now void CalcRhat() { // ******************* - + TStopwatch clock; clock.Start(); -//KS: Start parallel region -// If we would like to do this for thousands of chains we might consider using GPU for this + //KS: Start parallel region + // If we would like to do this for thousands of chains we might consider using GPU for this #ifdef MULTITHREAD #pragma omp parallel { #endif - + #ifdef MULTITHREAD #pragma omp for collapse(2) #endif @@ -475,7 +479,7 @@ void CalcRhat() { #ifdef MULTITHREAD #pragma omp for collapse(2) #endif - //Calculate the standard deviation for each parameter within each considered chain + //Calculate the standard deviation for each parameter within each considered chain for (int m = 0; m < Nchains; ++m) { for (int j = 0; j < nDraw; ++j) @@ -575,7 +579,9 @@ void CalcRhat() { // ******************* void SaveResults() { -// ******************* +// ******************* + #pragma GCC diagnostic ignored "-Wfloat-conversion" + std::string NameTemp = ""; //KS: If we run over many many chains there is danger that name will be so absurdly long we run over system limit and job will be killed :( if(Nchains < 5) @@ -585,7 +591,7 @@ void SaveResults() { std::string temp = MCMCFile[i]; while (temp.find(".root") != std::string::npos) { - temp = temp.substr(0, temp.find(".root")); + temp = temp.substr(0, temp.find(".root")); } NameTemp = NameTemp + temp + "_"; @@ -668,13 +674,13 @@ void SaveResults() { RhatFoldedLogPlot->Write(); //KS: Now we make fancy canvases, consider some function to have less copy pasting - TCanvas *TempCanvas = new TCanvas("Canvas", "Canvas", 1024, 1024); + auto TempCanvas = std::make_unique("Canvas", "Canvas", 1024, 1024); gStyle->SetOptStat(0); TempCanvas->SetGridx(); TempCanvas->SetGridy(); // Random line to write useful information to TLegend - TLine *TempLine = new TLine(0 , 0, 0, 0); + auto TempLine = std::make_unique(0, 0, 0, 0); TempLine->SetLineColor(kBlack); RhatPlot->GetXaxis()->SetTitle("R hat"); @@ -690,7 +696,7 @@ void SaveResults() { Legend->SetLineWidth(0); Legend->SetLineColor(0); - Legend->AddEntry(TempLine, Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); + Legend->AddEntry(TempLine.get(), Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); Legend->AddEntry(RhatPlot, "Rhat Gelman 2013", "l"); Legend->AddEntry(RhatFoldedPlot, "Rhat-Folded Gelman 2021", "l"); @@ -715,7 +721,7 @@ void SaveResults() { Legend->SetLineWidth(0); Legend->SetLineColor(0); - Legend->AddEntry(TempLine, Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); + Legend->AddEntry(TempLine.get(), Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); Legend->AddEntry(RhatLogPlot, "Rhat Gelman 2013", "l"); Legend->AddEntry(RhatFoldedLogPlot, "Rhat-Folded Gelman 2021", "l"); @@ -743,7 +749,7 @@ void SaveResults() { const double Mean2 = EffectiveSampleSizeFoldedPlot->GetMean(); const double RMS2 = EffectiveSampleSizeFoldedPlot->GetRMS(); - Legend->AddEntry(TempLine, Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); + Legend->AddEntry(TempLine.get(), Form("Number of throws=%.0i, Number of chains=%.1i", Ntoys, Nchains), ""); Legend->AddEntry(EffectiveSampleSizePlot, Form("S_{eff, BDA2} #mu = %.2f, #sigma = %.2f",Mean1 ,RMS1), "l"); Legend->AddEntry(EffectiveSampleSizeFoldedPlot, Form("S_{eff, BDA2} Folded, #mu = %.2f, #sigma = %.2f",Mean2 ,RMS2), "l"); @@ -766,8 +772,6 @@ void SaveResults() { delete EffectiveSampleSizeFoldedPlot; delete Legend; - delete TempCanvas; - delete TempLine; delete RhatLogPlot; delete RhatFoldedLogPlot; @@ -782,7 +786,7 @@ void SaveResults() { //KS: Pseudo destructor void DestroyArrays() { // ******************* - + MACH3LOG_INFO("Killing all arrays"); delete[] MeanGlobal; delete[] StandardDeviationGlobal; @@ -823,22 +827,19 @@ void DestroyArrays() { delete[] StandardDeviationFolded; } - // ******************* //calculate median double CalcMedian(double arr[], const int size) { -// ******************* +// ******************* std::sort(arr, arr+size); if (size % 2 != 0) - return (double)arr[size/2]; - return (double)(arr[(size-1)/2] + arr[size/2])/2.0; + return arr[size/2]; + return (arr[(size-1)/2] + arr[size/2])/2.0; } - // ******************* //calculate median void CapVariable(double var, const double cap) { -// ******************* - +// ******************* if(std::isnan(var) || !std::isfinite(var)) var = cap; } diff --git a/Doc/Doxyfile b/Doc/Doxyfile index bcfdfc610..b102af41c 100644 --- a/Doc/Doxyfile +++ b/Doc/Doxyfile @@ -51,14 +51,14 @@ PROJECT_BRIEF = "Reference Guide" # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. -PROJECT_LOGO = "../Doc/mach3logo_small.png" +PROJECT_LOGO = "Doc/mach3logo_small.png" # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = +OUTPUT_DIRECTORY = Doc/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -142,7 +142,7 @@ FULL_PATH_NAMES = YES # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. -STRIP_FROM_PATH = /github/workspace/ +STRIP_FROM_PATH = /home/runner/work/MaCh3/MaCh3/ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which @@ -151,7 +151,7 @@ STRIP_FROM_PATH = /github/workspace/ # specify the list of include paths that are normally passed to the compiler # using the -I flag. -STRIP_FROM_INC_PATH = /github/workspace/ +STRIP_FROM_INC_PATH = /home/runner/work/MaCh3/MaCh3/ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't @@ -670,7 +670,7 @@ LAYOUT_FILE = # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. -CITE_BIB_FILES = ../Doc/bibliography.bib +CITE_BIB_FILES = Doc/bibliography.bib #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages @@ -741,7 +741,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = mainpage.md ../ ../manager ../splines ../samplePDF ../OscClass ../mcmc ../Diagnostics ../plotting ../plotting/plottingUtils ../Diagnostics/Diagnostics_utils ../covariance Plots/ +INPUT = . Doc/mainpage.md manager splines samplePDF mcmc Diagnostics plotting plotting/plottingUtils Diagnostics/Diagnostics_utils covariance # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -891,7 +891,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = mainpage.md +USE_MDFILE_AS_MAINPAGE = Doc/mainpage.md #--------------------------------------------------------------------------- # Configuration options related to source browsing @@ -1080,7 +1080,7 @@ HTML_STYLESHEET = # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_EXTRA_STYLESHEET = ../Doc/MaCh3.css +HTML_EXTRA_STYLESHEET = Doc/MaCh3.css # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note diff --git a/Doc/MaCh3DockerFiles/Alma9/Dockerfile b/Doc/MaCh3DockerFiles/Alma9/Dockerfile index f299eac0d..b1d9daeb5 100644 --- a/Doc/MaCh3DockerFiles/Alma9/Dockerfile +++ b/Doc/MaCh3DockerFiles/Alma9/Dockerfile @@ -40,5 +40,9 @@ ENV MaCh3_ROOT=${MACH3_INSTALL_DIR} ENV PATH=${MaCh3_ROOT}/bin:${PATH} \ LD_LIBRARY_PATH=${MaCh3_ROOT}/lib:${LD_LIBRARY_PATH} +# pip install pyMaCh3 +WORKDIR ${MACH3_WORK_DIR} +RUN pip install . + # Start from MaCh3 install dir WORKDIR ${MACH3_INSTALL_DIR} diff --git a/Doc/MaCh3DockerFiles/Fedora32/Dockerfile b/Doc/MaCh3DockerFiles/Fedora32/Dockerfile index dd8cf1f7e..8f51df245 100644 --- a/Doc/MaCh3DockerFiles/Fedora32/Dockerfile +++ b/Doc/MaCh3DockerFiles/Fedora32/Dockerfile @@ -38,5 +38,9 @@ ENV MaCh3_ROOT=${MACH3_INSTALL_DIR} ENV PATH=${MaCh3_ROOT}/bin:${PATH} \ LD_LIBRARY_PATH=${MaCh3_ROOT}/lib:${LD_LIBRARY_PATH} +# pip install pyMaCh3 +WORKDIR ${MACH3_WORK_DIR} +RUN pip install . + # Start from MaCh3 install dir WORKDIR ${MACH3_INSTALL_DIR} diff --git a/Doc/MaCh3DockerFiles/Rocky9/Dockerfile b/Doc/MaCh3DockerFiles/Rocky9/Dockerfile new file mode 100644 index 000000000..ae38be7c7 --- /dev/null +++ b/Doc/MaCh3DockerFiles/Rocky9/Dockerfile @@ -0,0 +1,46 @@ +#To run use: docker build -t mach3 . +# KS: Get glorious container from Kamil which will work as a base +FROM kamilskwarczynski/nukamil:latest AS mach3_build + +# Add a label for the author +LABEL maintainer="The MaCh3 Collaboration" +LABEL website="https://mach3-software.github.io/MaCh3/" +LABEL org.opencontainers.image.description="Official MaCh3 container" + +# Declare the build argument +ARG MACH3_VERSION +ENV MACH3_VERSION=${MACH3_VERSION:-develop} + +ARG CMAKE_OPTIONS +ENV CMAKE_OPTIONS=${CMAKE_OPTIONS:-DMaCh3_PYTHON_ENABLED=ON} + +ARG INSTALL_OPTIONS +ENV INSTALL_OPTIONS=${INSTALL_OPTIONS:-"VERBOSE=1"} + +ENV MACH3_WORK_DIR=/opt/MaCh3/ +ENV MACH3_INSTALL_DIR=/opt/MaCh3/build/ + +RUN mkdir -p ${MACH3_WORK_DIR} + +WORKDIR /opt/ +# KS: Let's clone MaCh3 +RUN --mount=type=ssh git clone https://github.com/mach3-software/MaCh3 ${MACH3_WORK_DIR} +WORKDIR ${MACH3_WORK_DIR} +RUN git checkout ${MACH3_VERSION} + +RUN mkdir -p ${MACH3_INSTALL_DIR} +WORKDIR ${MACH3_INSTALL_DIR} +RUN cmake ${CMAKE_OPTIONS} ${MACH3_WORK_DIR} +RUN make ${INSTALL_OPTIONS} && make install + +# KS: Need to set them here, otherwise container using this container will not be able to find MaCh3 +ENV MaCh3_ROOT=${MACH3_INSTALL_DIR} +ENV PATH=${MaCh3_ROOT}/bin:${PATH} \ + LD_LIBRARY_PATH=${MaCh3_ROOT}/lib:${LD_LIBRARY_PATH} + +# pip install pyMaCh3 +WORKDIR ${MACH3_WORK_DIR} +RUN pip install . + +# Start from MaCh3 install dir +WORKDIR ${MACH3_INSTALL_DIR} diff --git a/Doc/MaCh3DockerFiles/Ubuntu22.04/Dockerfile b/Doc/MaCh3DockerFiles/Ubuntu22.04/Dockerfile index ea571ec8c..60522c02b 100644 --- a/Doc/MaCh3DockerFiles/Ubuntu22.04/Dockerfile +++ b/Doc/MaCh3DockerFiles/Ubuntu22.04/Dockerfile @@ -13,7 +13,7 @@ RUN apt update && apt upgrade -y RUN apt-get install -y nlohmann-json3-dev #MISC_SW -RUN apt install -y --no-install-recommends vim less nano gdb csh tcsh ed quota python3 python3-dev cvs procmail ca-certificates +RUN apt install -y --no-install-recommends vim less nano gdb csh tcsh ed quota python3 python3-dev python3-pip cvs procmail ca-certificates # Declare the build argument ARG MACH3_VERSION @@ -44,5 +44,9 @@ ENV MaCh3_ROOT=${MACH3_INSTALL_DIR} ENV PATH=${MaCh3_ROOT}/bin:${PATH} \ LD_LIBRARY_PATH=${MaCh3_ROOT}/lib:${LD_LIBRARY_PATH} +# pip install pyMaCh3 +WORKDIR ${MACH3_WORK_DIR} +RUN pip install . + # Start from MaCh3 install dir WORKDIR ${MACH3_INSTALL_DIR} diff --git a/Doc/bibliography.bib b/Doc/bibliography.bib index 296044540..79fac0844 100755 --- a/Doc/bibliography.bib +++ b/Doc/bibliography.bib @@ -168,3 +168,15 @@ @article{gelman2019 doi = "10.48550/arXiv.1903.08008", url = "https://doi.org/10.48550/arXiv.1903.08008" } + +@article{2011ThinningMCMC, + author = {William A. Link and Mitchell J. Eaton}, + title = {On thinning of chains in MCMC}, + journal = {Methods in Ecology and Evolution}, + volume = {2}, + number = {3}, + pages = {305-310}, + year = {2011}, + doi = {10.1111/j.2041-210X.2011.00131.x}, + url = {https://doi.org/10.1111/j.2041-210X.2011.00131.x}, +} diff --git a/Doc/html/nojekyll.txt b/Doc/html/nojekyll.txt new file mode 100755 index 000000000..8b1378917 --- /dev/null +++ b/Doc/html/nojekyll.txt @@ -0,0 +1 @@ + diff --git a/Doc/sphinx/source/covariance.rst b/Doc/sphinx/source/covariance.rst index ff2fceab2..ed0593167 100644 --- a/Doc/sphinx/source/covariance.rst +++ b/Doc/sphinx/source/covariance.rst @@ -2,9 +2,9 @@ Covariance ========== This module contains the covariance objetcs which Mach3 uses to deal with systematic parameters. It also includes -the :py:class:`pyMaCh3.covariance.CovarianceBase` class which you can use to implement your own! +the :py:class:`pyMaCh3._pyMaCh3.covariance.CovarianceBase` class which you can use to implement your own! -.. automodapi:: pyMaCh3.covariance +.. automodapi:: pyMaCh3._pyMaCh3.covariance :members: :undoc-members: :show-inheritance: \ No newline at end of file diff --git a/Doc/sphinx/source/fitter.rst b/Doc/sphinx/source/fitter.rst index c2126795b..fa817ebc2 100644 --- a/Doc/sphinx/source/fitter.rst +++ b/Doc/sphinx/source/fitter.rst @@ -2,9 +2,9 @@ Fitter ====== This module contains the various MaCh3 fitter algorithms which are available, as well as -the :py:class:`pyMaCh3.fitter.FitterBase` class which you can use to implement your own! +the :py:class:`pyMaCh3._pyMaCh3.fitter.FitterBase` class which you can use to implement your own! -.. automodapi:: pyMaCh3.fitter +.. automodapi:: pyMaCh3._pyMaCh3.fitter :members: :undoc-members: :show-inheritance: diff --git a/Doc/sphinx/source/manager.rst b/Doc/sphinx/source/manager.rst index 5f62a08ab..5248d3838 100644 --- a/Doc/sphinx/source/manager.rst +++ b/Doc/sphinx/source/manager.rst @@ -1,7 +1,7 @@ Manager ======= -This module handles the high level stuff like config options and YAML stuff. The main class is the :py:class:`pyMaCh3.manager.Manager` class. \ +This module handles the high level stuff like config options and YAML stuff. The main class is the :py:class:`pyMaCh3._pyMaCh3.manager.Manager` class. \ You can read more about the manager and config files on `the wiki page `_. \ YAML stuff works essentially the same as in the c++ version but with some caveats. The main difference is that in the python version, the way that you access the actual data of a yaml node is different due to the way the python binding of the c++ code works. @@ -45,7 +45,7 @@ Parsing arrays can be made a bit less painful using list comprehension :: str_list = [i.data() for i in node['StrArray']] -.. automodapi:: pyMaCh3.manager +.. automodapi:: pyMaCh3._pyMaCh3.manager :members: :undoc-members: :show-inheritance: diff --git a/Doc/sphinx/source/plotting.rst b/Doc/sphinx/source/plotting.rst index c7293bf97..9ccb4cc80 100644 --- a/Doc/sphinx/source/plotting.rst +++ b/Doc/sphinx/source/plotting.rst @@ -3,7 +3,7 @@ plotting The plotting module can be used to make beautiful plots. See the `plotting wiki page `_ for information on how to configure the plotting library to work with your MaCh3 output files and other non-MaCh3 based fitters so you can compare results. -The main class to worry about is :py:class:`pyMaCh3.plotting.PlottingManager` which provides the +The main class to worry about is :py:class:`pyMaCh3._pyMaCh3.plotting.PlottingManager` which provides the high level functionality and gives you access to everything else you should need. To use this in your plotting script simply do :: @@ -22,7 +22,7 @@ To use this in your plotting script simply do :: -.. automodapi:: pyMaCh3.plotting +.. automodapi:: pyMaCh3._pyMaCh3.plotting :members: :undoc-members: :show-inheritance: diff --git a/Doc/sphinx/source/sample-pdf.rst b/Doc/sphinx/source/sample-pdf.rst index 079a5b576..23cde3040 100644 --- a/Doc/sphinx/source/sample-pdf.rst +++ b/Doc/sphinx/source/sample-pdf.rst @@ -4,12 +4,12 @@ Sample PDF This module deals with sampling from the posterior density function of your particular experimental model at different points, given your data. -In order to do this, you will generally need to create a SamplePDF object derived from :py:class:`pyMaCh3.fitter.SamplePDFFDBase` +In order to do this, you will generally need to create a SamplePDF object derived from :py:class:`pyMaCh3._pyMaCh3.fitter.SamplePDFFDBase` for each sample of events for your experiment. For some more details on this you can see `the wiki page `_ on this. The code examples there are written using c++ however the general ideas are the same. Happy sampling! -.. automodapi:: pyMaCh3.sample_pdf +.. automodapi:: pyMaCh3._pyMaCh3.sample_pdf :members: :undoc-members: :show-inheritance: diff --git a/Doc/sphinx/source/splines.rst b/Doc/sphinx/source/splines.rst index 35e5b5a32..fa6e1a342 100644 --- a/Doc/sphinx/source/splines.rst +++ b/Doc/sphinx/source/splines.rst @@ -3,9 +3,9 @@ Splines This module provides the utility for dealing with spline parameters. For some background reading se the `Splines wiki page `_. -The main class which represents individual spline functions is :py:class:`pyMaCh3.splines.ResponseFunction`. +The main class which represents individual spline functions is :py:class:`pyMaCh3._pyMaCh3.splines.ResponseFunction`. This is an abstract representation which covers multiple different types of interpolation, where the type of interpolation is specified at the time of construction. -The available interpolation types are defined by :py:class:`pyMaCh3.splines.InterpolationType`. Here are some examples +The available interpolation types are defined by :py:class:`pyMaCh3._pyMaCh3.splines.InterpolationType`. Here are some examples .. image:: spline-examples.png :width: 400 @@ -21,7 +21,7 @@ To construct a ResponseFunction you must specify the x and y positions of the kn TSpline3_response_2 = splines.ResponseFunction([0.0, 1.0, 2.0], [2.0, 3.0, 0.0], splines.InterpolationType.Cubic_TSpline3) linear_response_2 = splines.ResponseFunction([10.0, 11.0, 12.0], [3.0, 0.0, 4.5], splines.InterpolationType.Linear) -Another important part of this module is the :py:class:`pyMaCh3.splines.EventSplineMonolith` class which allows you to easily and speedily deal with event-by-event splines in your analysis. +Another important part of this module is the :py:class:`pyMaCh3._pyMaCh3.splines.EventSplineMonolith` class which allows you to easily and speedily deal with event-by-event splines in your analysis. To build this you first need to construct a response function for each event-by-event spline parameter for each of your events as in the example above. Let's take those example responses and build a simple EventSplineMonolith:: @@ -30,7 +30,7 @@ Let's take those example responses and build a simple EventSplineMonolith:: This will create an EventSplineMonolith which can deal with the reweighting of two events with two spline parameters. We now need to be able to set the values of the parameters so that we can calculate event weights. -This is done using the :py:func:`pyMaCh3.splines.EventSplineMonolith.set_param_value_array` function. +This is done using the :py:func:`pyMaCh3._pyMaCh3.splines.EventSplineMonolith.set_param_value_array` function. This allows us to bind a numpy array to our EventSplineMonolith, whose values we can change, and this will set the values of the parameters inside of the monolith. This works as follows:: @@ -62,7 +62,7 @@ This works as follows:: Happy splining! -.. automodapi:: pyMaCh3.splines +.. automodapi:: pyMaCh3._pyMaCh3.splines :members: :undoc-members: :show-inheritance: \ No newline at end of file diff --git a/README.md b/README.md index 6f7d2bd09..43b01b5f0 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Example of plots made using MaCh3 apparent in scientific publications, for more MaCh3 ## Cite -When citing MaCh3, please use [on Zenodo](https://zenodo.org/records/7608367) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.13642670.svg)](https://doi.org/10.5281/zenodo.13642670). +When citing MaCh3, please use [on Zenodo](https://zenodo.org/records/7608367) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7608367.svg)](https://doi.org/10.5281/zenodo.7608367). ## How to Compile MaCh3 follows standard cmake pattern. By default you should get most optimal build setting although below we list many configurable options: @@ -63,14 +63,16 @@ Some functionalities rely on setting `Env{MACH3}` which should point to path exp ## Python -MaCh3 can be compiled with a python interface by specifying the cmake option +MaCh3 has an optional python interface (pyMaCh3) which provides much of the same functionality as the c++ interface (see [here](https://mach3-software.github.io/MaCh3/pyMaCh3/mainpage.html) for documentation). + +You can tell the build system to set up the pyMaCh3 interface by specifying + ```bash cmake ../ -DMaCh3_PYTHON_ENABLED=ON make && make install ``` -Currently the python module only contains an interface to the plotting library (see [here](https://github.com/mach3-software/MaCh3/blob/develop/plotting/README.md#python) for more information on how to use it) - +when building ### Building with Pip @@ -79,7 +81,7 @@ Additionally, you can build just the Python module by doing: ```bash pip install -t . ``` -The -t option specifies an install location which can be useful if you are on a computing cluster and don't have write access to the default install location. If you specify a non-standard location you will need to add it to your `PYTHONPATH` as above so that python can find the module. +The (optional) -t option specifies an install location which can be useful if you are on a computing cluster and don't have write access to the default install location. If you specify a non-standard location you will need to add it to your `PYTHONPATH` as above so that python can find the module. ## Multithreading MaCh3 quite heavily relies on Multithreading, it is turned on by default. If for debugging purposes you would like to turn it off please use @@ -107,7 +109,7 @@ Following neutrino oscillation calculators are available: | Prob3++Linear | CPU | Beam | | | NuFastLinear | CPU | Beam | [Ref](https://doi.org/10.48550/arXiv.2405.02400) | -If nothing is specified in cmake build then CUDAProb3Linear will be used. To control which oscillation calculators you want to use here is syntax: +If nothing is specified in cmake build then NuFastLinear_ENABLED will be used. To control which oscillation calculators you want to use here is syntax: ```bash cmake ../ -DCUDAProb3Linear_ENABLED=ON -DCUDAProb3_ENABLED=ON -DProbGPULinear_ENABLED=ON -DProb3ppLinear_ENABLED=ON -DNuFastLinear_ENABLED=ON @@ -150,6 +152,7 @@ Based on several test here are recommended version: | Name | Status | |-------------|--------| | Alma9 | ✅ | +| Rocky9 | ✅ | | Ubuntu22.04 | ✅ | | Fedora32 | ✅ | | CentOS7 | ❔ | diff --git a/cmake/Modules/CUDASamples.cmake b/cmake/Modules/CUDASamples.cmake index d25772339..363b09afd 100644 --- a/cmake/Modules/CUDASamples.cmake +++ b/cmake/Modules/CUDASamples.cmake @@ -30,7 +30,7 @@ if(NOT CUDASAMPLES_FOUND) endif() cmessage(STATUS "Using the following CUDA samples paths: ${CMAKE_CUDA_SAMPLES_PATH}") -target_include_directories(MaCh3CompilerOptions INTERFACE +target_include_directories(MaCh3CompileDefinitions INTERFACE "$" "$" ) diff --git a/cmake/Modules/CUDASetup.cmake b/cmake/Modules/CUDASetup.cmake index 7fe7ad7f5..d78786271 100644 --- a/cmake/Modules/CUDASetup.cmake +++ b/cmake/Modules/CUDASetup.cmake @@ -47,7 +47,7 @@ string(REPLACE ";" " " CUDA_ARCHITECTURES_STR "${CMAKE_CUDA_ARCHITECTURES}") cmessage(STATUS "Using following CUDA architectures: ${CUDA_ARCHITECTURES_STR}") if(NOT MaCh3_DEBUG_ENABLED) - target_compile_options(MaCh3CompilerOptions INTERFACE + target_compile_options(MaCh3CompileDefinitions INTERFACE "$<$:-prec-sqrt=false;-use_fast_math;-O3;-Werror;cross-execution-space-call;-w>" "$<$:-Xptxas=-allow-expensive-optimizations=true;-Xptxas=-fmad=true;-Xptxas=-O3;>" "$<$:-Xcompiler=-fpic;-Xcompiler=-O3;-Xcompiler=-Wall;-Xcompiler=-Wextra;-Xcompiler=-Werror;-Xcompiler=-Wno-error=unused-parameter>" @@ -55,15 +55,15 @@ if(NOT MaCh3_DEBUG_ENABLED) else() #CW: -g and -G for debug flags to use cuda-gdb; slows stuff A LOT #-pxtas-options=-v, -maxregcount=N - target_compile_options(MaCh3CompilerOptions INTERFACE + target_compile_options(MaCh3CompileDefinitions INTERFACE "$<$:-prec-sqrt=false;-use_fast_math;-Werror;cross-execution-space-call;-w>" "$<$:-Xcompiler=-g;>" "$<$:-Xptxas=-dlcm=ca;-Xptxas=-warn-lmem-usage;-Xptxas=-warn-spills;-Xptxas=-v;-Xcompiler=-Wall;-Xcompiler=-Wextra;-Xcompiler=-Werror;-Xcompiler=-Wno-error=unused-parameter>" ) - target_compile_definitions(MaCh3CompilerOptions INTERFACE "$<$:CUDA_ERROR_CHECK>") + target_compile_definitions(MaCh3CompileDefinitions INTERFACE "$<$:CUDA_ERROR_CHECK>") endif() -target_include_directories(MaCh3CompilerOptions INTERFACE ${CUDAToolkit_INCLUDE_DIRS}) +target_include_directories(MaCh3CompileDefinitions INTERFACE ${CUDAToolkit_INCLUDE_DIRS}) if(MaCh3_DEBUG_ENABLED) include(${CMAKE_CURRENT_LIST_DIR}/CUDASamples.cmake) @@ -76,7 +76,7 @@ if(NOT DEFINED NSplines_GPU) endif() # Pass NSplines_GPU as a preprocessor definition to the compiler -target_compile_definitions(MaCh3CompilerOptions INTERFACE NSplines_GPU=${NSplines_GPU}) +target_compile_definitions(MaCh3CompileDefinitions INTERFACE NSplines_GPU=${NSplines_GPU}) cmessage(STATUS "Using \"${NSplines_GPU}\" for GPU EventByEvent Splines") diff --git a/cmake/Modules/MaCh3Dependencies.cmake b/cmake/Modules/MaCh3Dependencies.cmake index 1469b679d..2a6fb0976 100755 --- a/cmake/Modules/MaCh3Dependencies.cmake +++ b/cmake/Modules/MaCh3Dependencies.cmake @@ -2,50 +2,67 @@ # download CPM.cmake file( DOWNLOAD - https://github.com/cpm-cmake/CPM.cmake/releases/download/v0.38.8/CPM.cmake + https://github.com/cpm-cmake/CPM.cmake/releases/download/v0.40.2/CPM.cmake ${CMAKE_CURRENT_BINARY_DIR}/cmake/CPM.cmake ) include(${CMAKE_CURRENT_BINARY_DIR}/cmake/CPM.cmake) -#Luke's handing cmake modules which Neutrino hep experiments might want -CPMFindPackage( - NAME CMakeModules - GIT_TAG stable - GITHUB_REPOSITORY NuHepMC/CMakeModules - DOWNLOAD_ONLY - ) -include(${CMakeModules_SOURCE_DIR}/NuHepMCModules.cmake) -include(NuHepMCUtils) - # Check if CUDA was found if(MaCh3_GPU_ENABLED) include(${CMAKE_CURRENT_LIST_DIR}/CUDASetup.cmake) endif() -include(ROOT) -if(NOT TARGET ROOT::ROOT) - cmessage(FATAL_ERROR "MaCh3 Expected dependency target: ROOT::ROOT") -endif() +### Begin ROOT setup +find_package(ROOT 6.18 REQUIRED) + +STRING(STRIP "${ROOT_CXX_FLAGS}" ROOT_CXX_FLAGS_LIST) +STRING(REPLACE " " ";" ROOT_CXX_FLAGS_LIST ${ROOT_CXX_FLAGS_LIST}) + +list (FIND ROOT_CXX_FLAGS_LIST "-std=c++14" CPP14_INDEX) +list (FIND ROOT_CXX_FLAGS_LIST "-std=c++1y" CPP1Y_INDEX) +list (FIND ROOT_CXX_FLAGS_LIST "-std=c++17" CPP17_INDEX) +list (FIND ROOT_CXX_FLAGS_LIST "-std=c++1z" CPP1Z_INDEX) +list (FIND ROOT_CXX_FLAGS_LIST "-std=c++20" CPP20_INDEX) -if(ROOT_VERSION VERSION_LESS 6.18.00) - cmessage(FATAL_ERROR "Using ROOT version smaller than 6.18.0, this may lead to unexpected results") +if (CPP14_INDEX GREATER -1) + SET(ROOT_CXX_STANDARD 14) +elseif (CPP1Y_INDEX GREATER -1) + SET(ROOT_CXX_STANDARD 14) +elseif (CPP17_INDEX GREATER -1) + SET(ROOT_CXX_STANDARD 17) +elseif (CPP1Z_INDEX GREATER -1) + SET(ROOT_CXX_STANDARD 17) +elseif (CPP20_INDEX GREATER -1) + SET(ROOT_CXX_STANDARD 20) endif() +cmessage(STATUS "ROOT_CXX_FLAGS: \"${ROOT_CXX_FLAGS}\" -> ROOT_CXX_STANDARD: ${ROOT_CXX_STANDARD}") + +execute_process(COMMAND root-config --features + OUTPUT_VARIABLE ROOT_CONFIG_FEATURES OUTPUT_STRIP_TRAILING_WHITESPACE) +string(REPLACE " " ";" ROOT_FEATURES_LIST "${ROOT_CONFIG_FEATURES}") +# Check if "minuit2" is in the list of ROOT features +list(FIND ROOT_FEATURES_LIST "minuit2" ROOT_CONFIG_MINUIT2) + # KS: Since ROOT 6.32.0 Minuit is turned on by default set(MaCh3_MINUIT2_ENABLED FALSE) -if(ROOT_VERSION GREATER_EQUAL 6.32.00 OR ROOT_CXX_FLAGS MATCHES "-DMINUIT2_ENABLED") +if(ROOT_VERSION GREATER_EQUAL 6.32.00 OR ROOT_CONFIG_MINUIT2 GREATER -1) set(MaCh3_MINUIT2_ENABLED TRUE) endif() +### End ROOT setup #YAML for reading in config files -set(YAML_CPP_VERSION 0.7.0) #KS: We need it for version.h file also define this number olny once -set(YAML_CPP_GIT_TAG "yaml-cpp-${YAML_CPP_VERSION}") +set(YAML_CPP_VERSION 0.8.0) #KS: We need it for version.h file also define this number only once CPMAddPackage( NAME yaml-cpp VERSION ${YAML_CPP_VERSION} GITHUB_REPOSITORY "jbeder/yaml-cpp" - GIT_TAG "${YAML_CPP_GIT_TAG}" + GIT_TAG "${YAML_CPP_VERSION}" + GIT_SHALLOW YES OPTIONS + "YAML_CPP_INSTALL ON" + "YAML_CPP_BUILD_TESTS OFF" + "YAML_CPP_BUILD_CONTRIB OFF" "YAML_BUILD_SHARED_LIBS ON" ) @@ -72,7 +89,7 @@ set(MaCh3_Fitter_ENABLED "MR2T2") LIST(APPEND MaCh3_Fitter_ENABLED " PSO") if(MaCh3_MINUIT2_ENABLED) LIST(APPEND MaCh3_Fitter_ENABLED " Minuit2") - target_compile_definitions(MaCh3CompilerOptions INTERFACE MaCh3_MINUIT2) + target_compile_definitions(MaCh3CompileDefinitions INTERFACE MaCh3_MINUIT2) endif() @@ -82,11 +99,11 @@ DefineEnabledRequiredSwitch(MaCh3_PYTHON_ENABLED FALSE) if( MaCh3_PYTHON_ENABLED ) set(PYBIND11_FINDPYTHON ON) - CPMFindPackage( NAME pybind11 VERSION 2.13.5 GITHUB_REPOSITORY "pybind/pybind11" + GIT_SHALLOW YES GIT_TAG v2.13.5 ) endif() diff --git a/cmake/Modules/MaCh3Utils.cmake b/cmake/Modules/MaCh3Utils.cmake index 02213380d..546fd285b 100755 --- a/cmake/Modules/MaCh3Utils.cmake +++ b/cmake/Modules/MaCh3Utils.cmake @@ -36,3 +36,47 @@ if(NOT COMMAND SwitchLogic) endif() endfunction() endif() + +if(NOT COMMAND cmessage) + function(cmessage) + if(NOT WIN32) + string(ASCII 27 Esc) + set(CM_ColourReset "${Esc}[m") + set(CM_ColourBold "${Esc}[1m") + set(CM_Red "${Esc}[31m") + set(CM_Green "${Esc}[32m") + set(CM_Yellow "${Esc}[33m") + set(CM_Blue "${Esc}[34m") + set(CM_Magenta "${Esc}[35m") + set(CM_Cyan "${Esc}[36m") + set(CM_White "${Esc}[37m") + set(CM_BoldRed "${Esc}[1;31m") + set(CM_BoldGreen "${Esc}[1;32m") + set(CM_BoldYellow "${Esc}[1;33m") + set(CM_BoldBlue "${Esc}[1;34m") + set(CM_BoldMagenta "${Esc}[1;35m") + set(CM_BoldCyan "${Esc}[1;36m") + set(CM_BoldWhite "${Esc}[1;37m") + endif() + + list(GET ARGV 0 MessageType) + list(REMOVE_AT ARGV 0) + if(MessageType STREQUAL FATAL_ERROR OR MessageType STREQUAL SEND_ERROR) + message(${MessageType} "${CM_BoldRed}${ARGV}${CM_ColourReset}") + elseif(MessageType STREQUAL WARNING) + message(${MessageType} "${CM_BoldYellow}${ARGV}${CM_ColourReset}") + elseif(MessageType STREQUAL AUTHOR_WARNING) + message(${MessageType} "${CM_BoldCyan}${ARGV}${CM_ColourReset}") + elseif(MessageType STREQUAL STATUS) + message(${MessageType} "${CM_Green}[INFO]:${CM_ColourReset} ${ARGV}") + elseif(MessageType STREQUAL CACHE) + message(-- "${CM_Blue}[CACHE]:${CM_ColourReset} ${ARGV}") + elseif(MessageType STREQUAL DEBUG) + if(DEFINED DEBUG_BUILDSYSTEMGENERATOR AND DEBUG_BUILDSYSTEMGENERATOR) + message("${CM_Magenta}[DEBUG]:${CM_ColourReset} ${ARGV}") + endif() + else() + message(${MessageType} "${CM_Green}[${MessageType}]:${CM_ColourReset} ${ARGV}") + endif() + endfunction() +endif() \ No newline at end of file diff --git a/cmake/Modules/NuOscillatorSetup.cmake b/cmake/Modules/NuOscillatorSetup.cmake index 2e0143fc4..a8785fc28 100644 --- a/cmake/Modules/NuOscillatorSetup.cmake +++ b/cmake/Modules/NuOscillatorSetup.cmake @@ -15,7 +15,7 @@ if (NOT CUDAProb3Linear_ENABLED AND NOT Prob3ppLinear_ENABLED AND NOT NuFastLinear_ENABLED AND NOT OscProb_ENABLED) - set(CUDAProb3Linear_ENABLED TRUE) + set(NuFastLinear_ENABLED TRUE) endif() #KS: Save which oscillators are being used @@ -69,6 +69,7 @@ CPMAddPackage( VERSION 1.0.3 GITHUB_REPOSITORY "dbarrow257/NuOscillator" GIT_TAG "v1.0.3" + GIT_SHALLOW YES OPTIONS "UseGPU ${DAN_USE_GPU}" "UseMultithreading ${DAN_USE_MULTITHREAD}" @@ -83,6 +84,7 @@ CPMAddPackage( "NuOscillator_Compiler_Flags ${compile_options_string}" "CMAKE_CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES_STRING}" + "CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD}" ) if(NOT TARGET NuOscillator) diff --git a/cmake/Templates/Doxyfile.in b/cmake/Templates/Doxyfile.in index d5ac8285b..7377d2b6b 100644 --- a/cmake/Templates/Doxyfile.in +++ b/cmake/Templates/Doxyfile.in @@ -51,14 +51,14 @@ PROJECT_BRIEF = "Reference Guide" # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. -PROJECT_LOGO = "../Doc/mach3logo_small.png" +PROJECT_LOGO = "Doc/mach3logo_small.png" # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = +OUTPUT_DIRECTORY = Doc/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -142,7 +142,7 @@ FULL_PATH_NAMES = YES # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. -STRIP_FROM_PATH = /github/workspace/ +STRIP_FROM_PATH = /home/runner/work/MaCh3/MaCh3/ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which @@ -151,7 +151,7 @@ STRIP_FROM_PATH = /github/workspace/ # specify the list of include paths that are normally passed to the compiler # using the -I flag. -STRIP_FROM_INC_PATH = /github/workspace/ +STRIP_FROM_INC_PATH = /home/runner/work/MaCh3/MaCh3/ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't @@ -670,7 +670,7 @@ LAYOUT_FILE = # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. -CITE_BIB_FILES = ../Doc/bibliography.bib +CITE_BIB_FILES = Doc/bibliography.bib #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages @@ -741,7 +741,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = mainpage.md ../ ../manager ../splines ../samplePDF ../OscClass ../mcmc ../Diagnostics ../plotting ../plotting/plottingUtils ../Diagnostics/Diagnostics_utils ../covariance Plots/ +INPUT = . Doc/mainpage.md manager splines samplePDF mcmc Diagnostics plotting plotting/plottingUtils Diagnostics/Diagnostics_utils covariance # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -891,7 +891,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = mainpage.md +USE_MDFILE_AS_MAINPAGE = Doc/mainpage.md #--------------------------------------------------------------------------- # Configuration options related to source browsing @@ -1080,7 +1080,7 @@ HTML_STYLESHEET = # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_EXTRA_STYLESHEET = ../Doc/MaCh3.css +HTML_EXTRA_STYLESHEET = Doc/MaCh3.css # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note diff --git a/cmake/Templates/MaCh3Config.cmake.in b/cmake/Templates/MaCh3Config.cmake.in index 01b67485c..e42477dbb 100644 --- a/cmake/Templates/MaCh3Config.cmake.in +++ b/cmake/Templates/MaCh3Config.cmake.in @@ -17,18 +17,15 @@ set_property(GLOBAL PROPERTY MACH3_CXX_STANDARD "${MACH3_CXX_STANDARD}") get_filename_component(MaCh3_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) list(APPEND CMAKE_MODULE_PATH "${MaCh3_CMAKE_DIR}") list(APPEND CMAKE_MODULE_PATH "${MaCh3_CMAKE_DIR}/cmake") +list(APPEND CMAKE_MODULE_PATH "${MaCh3_CMAKE_DIR}/cmake/Modules") enable_language(CXX) set(MaCh3_FOUND TRUE) -include(ROOT) +include(MaCh3Utils) -if(NOT TARGET ROOT::ROOT) - cmessage(WARNING "Expected MaCh3 to set up dependency target: ROOT::ROOT") - set(MaCh3_FOUND FALSE) - return() -endif() +find_package(ROOT 6.18 REQUIRED) #Fancy NuOscillator find_package(NuOscillator @NUOSCILLATOR_VERSION@ REQUIRED) diff --git a/cmake/Templates/setup.MaCh3.sh.in b/cmake/Templates/setup.MaCh3.sh.in index 1fa5efd37..48a8cf9c0 100644 --- a/cmake/Templates/setup.MaCh3.sh.in +++ b/cmake/Templates/setup.MaCh3.sh.in @@ -85,7 +85,7 @@ add_to_PATH ${MaCh3_ROOT}/bin add_to_LD_LIBRARY_PATH ${MaCh3_ROOT}/lib if test -d ${MaCh3_ROOT}/pyMaCh3; then - add_to_PYTHONPATH ${MaCh3_ROOT}/pyMaCh3 + add_to_PYTHONPATH ${MaCh3_ROOT} fi unset SETUPDIR diff --git a/covariance/AdaptiveMCMCHandler.cpp b/covariance/AdaptiveMCMCHandler.cpp index db8743765..4ea519502 100755 --- a/covariance/AdaptiveMCMCHandler.cpp +++ b/covariance/AdaptiveMCMCHandler.cpp @@ -92,15 +92,15 @@ void AdaptiveMCMCHandler::SetAdaptiveBlocks(std::vector> block_ adapt_block_matrix_indices = std::vector(Npars, 0); // Should also make a matrix of block sizes - adapt_block_sizes = std::vector((int)block_indices.size()+1, 0); + adapt_block_sizes = std::vector(block_indices.size()+1, 0); adapt_block_sizes[0] = Npars; if(block_indices.size()==0 || block_indices[0].size()==0) return; // Now we loop over our blocks - for(int iblock=0; iblock<(int)block_indices.size(); iblock++){ + for(int iblock=0; iblockcd(); diff --git a/covariance/CMakeLists.txt b/covariance/CMakeLists.txt index 3239a17f6..2a410068d 100644 --- a/covariance/CMakeLists.txt +++ b/covariance/CMakeLists.txt @@ -26,7 +26,9 @@ if(NOT CPU_ONLY) set_property(TARGET Covariance PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES}) endif() -target_link_libraries(Covariance Manager MaCh3CompilerOptions) +target_link_libraries(Covariance PUBLIC Manager) +target_link_libraries(Covariance PRIVATE MaCh3Warnings) + target_include_directories(Covariance PUBLIC $ $) diff --git a/covariance/PCAHandler.cpp b/covariance/PCAHandler.cpp index 5d4b51316..e35d598d5 100755 --- a/covariance/PCAHandler.cpp +++ b/covariance/PCAHandler.cpp @@ -121,9 +121,13 @@ void PCAHandler::ConstructPCA(TMatrixDSym * covMatrix, const int firstPCAd, cons } #ifdef DEBUG_PCA + +#pragma GCC diagnostic ignored "-Wfloat-conversion" + // ******************************************** //KS: Let's dump all useful matrices to properly validate PCA void PCAHandler::DebugPCA(const double sum, TMatrixD temp, TMatrixDSym submat, int NumPar) { + // ******************************************** (void)submat;//This is used if DEBUG_PCA==2, this hack is to avoid compiler warnings TFile *PCA_Debug = new TFile("Debug_PCA.root", "RECREATE"); @@ -133,9 +137,9 @@ void PCAHandler::DebugPCA(const double sum, TMatrixD temp, TMatrixDSym submat, i //KS: If we have more than 200 plot becomes unreadable :( if(NumPar > 200) PlotText = false; - TH1D* heigen_values = new TH1D("eigen_values", "Eigen Values", (int)eigen_values.GetNrows(), 0.0, (int)eigen_values.GetNrows()); - TH1D* heigen_cumulative = new TH1D("heigen_cumulative", "heigen_cumulative", (int)eigen_values.GetNrows(), 0.0, (int)eigen_values.GetNrows()); - TH1D* heigen_frac = new TH1D("heigen_fractional", "heigen_fractional", (int)eigen_values.GetNrows(), 0.0, (int)eigen_values.GetNrows()); + TH1D* heigen_values = new TH1D("eigen_values", "Eigen Values", eigen_values.GetNrows(), 0.0, eigen_values.GetNrows()); + TH1D* heigen_cumulative = new TH1D("heigen_cumulative", "heigen_cumulative", eigen_values.GetNrows(), 0.0, eigen_values.GetNrows()); + TH1D* heigen_frac = new TH1D("heigen_fractional", "heigen_fractional", eigen_values.GetNrows(), 0.0, eigen_values.GetNrows()); heigen_values->GetXaxis()->SetTitle("Eigen Vector"); heigen_values->GetYaxis()->SetTitle("Eigen Value"); diff --git a/covariance/covarianceBase.cpp b/covariance/covarianceBase.cpp index c86f4a153..911dff22e 100644 --- a/covariance/covarianceBase.cpp +++ b/covariance/covarianceBase.cpp @@ -1,7 +1,7 @@ #include "covariance/covarianceBase.h" // ******************************************** -covarianceBase::covarianceBase(std::string name, std::string file) : inputFile(std::string(file)), pca(false) { +covarianceBase::covarianceBase(std::string name, std::string file) : inputFile(file), pca(false) { // ******************************************** MACH3LOG_INFO("Constructing instance of covarianceBase"); init(name, file); @@ -60,9 +60,6 @@ covarianceBase::~covarianceBase(){ delete[] InvertCovMatrix; delete[] throwMatrixCholDecomp; - const int nThreads = MaCh3Utils::GetNThreads(); - for (int iThread = 0;iThread < nThreads; iThread++) delete random_number[iThread]; - delete[] random_number; if (throwMatrix != nullptr) delete throwMatrix; } @@ -116,8 +113,9 @@ void covarianceBase::init(std::string name, std::string file) { throw MaCh3Exception(__FILE__ , __LINE__ ); } - // Should put in a - TMatrixDSym *CovMat = static_cast(infile->Get(name.c_str())); + + TMatrixDSym *CovMat = infile->Get(name.c_str()); + if (CovMat == nullptr) { MACH3LOG_ERROR("Could not find covariance matrix name {} in file {}", name, file); MACH3LOG_ERROR("Are you really sure {} exists in the file?", name); @@ -129,9 +127,9 @@ void covarianceBase::init(std::string name, std::string file) { const int nThreads = MaCh3Utils::GetNThreads(); //KS: set Random numbers for each thread so each thread has different seed //or for one thread if without MULTITHREAD - random_number = new TRandom3*[nThreads](); + random_number.reserve(nThreads); for (int iThread = 0; iThread < nThreads; iThread++) { - random_number[iThread] = new TRandom3(0); + random_number.emplace_back(std::make_unique(0)); } // Not using adaptive by default use_adaptive = false; @@ -190,14 +188,15 @@ void covarianceBase::init(const std::vector& YAMLFile) { const int nThreads = MaCh3Utils::GetNThreads(); //KS: set Random numbers for each thread so each thread has different seed //or for one thread if without MULTITHREAD - random_number = new TRandom3*[nThreads](); + random_number.reserve(nThreads); for (int iThread = 0; iThread < nThreads; iThread++) { - random_number[iThread] = new TRandom3(0); + random_number.emplace_back(std::make_unique(0)); } PrintLength = 35; // Set the covariance matrix - _fNumPar = _fYAMLDoc["Systematics"].size(); + _fNumPar = int(_fYAMLDoc["Systematics"].size()); + use_adaptive = false; InvertCovMatrix = new double*[_fNumPar](); @@ -309,8 +308,6 @@ void covarianceBase::init(const std::vector& YAMLFile) { MACH3LOG_INFO("----------------"); MACH3LOG_INFO("Found {} systematics parameters in total", _fNumPar); MACH3LOG_INFO("----------------"); - - return; } // ******************************************** @@ -348,7 +345,8 @@ void covarianceBase::setCovMatrix(TMatrixDSym *cov) { throw MaCh3Exception(__FILE__ , __LINE__ ); } covMatrix = cov; - invCovMatrix = static_cast(cov->Clone()); + + invCovMatrix = static_cast(cov->Clone()); invCovMatrix->Invert(); //KS: ROOT has bad memory management, using standard double means we can decrease most operation by factor 2 simply due to cache hits for (int i = 0; i < _fNumPar; i++) @@ -589,7 +587,7 @@ void covarianceBase::proposeStep() { // "Randomize" the parameters in the covariance class for the proposed step // Used the proposal kernel and the current parameter value to set proposed step // Also get a new random number for the randParams -void covarianceBase::randomize() { +void covarianceBase::randomize() _noexcept_ { // ************************************************ if (!pca) { //KS: By multithreading here we gain at least factor 2 with 8 threads with ND only fit @@ -633,7 +631,7 @@ void covarianceBase::randomize() { // ************************************************ // Correlate the steps by setting the proposed step of a parameter to its current value + some correlated throw -void covarianceBase::CorrelateSteps() { +void covarianceBase::CorrelateSteps() _noexcept_ { // ************************************************ //KS: Using custom function compared to ROOT one with 8 threads we have almost factor 2 performance increase, by replacing TMatrix with just double we increase it even more MatrixVectorMulti(corr_throw, throwMatrixCholDecomp, randParams, _fNumPar); @@ -681,7 +679,7 @@ void covarianceBase::CorrelateSteps() { } // ******************************************** // Update so that current step becomes the previously proposed step -void covarianceBase::acceptStep() { +void covarianceBase::acceptStep() _noexcept_ { // ******************************************** if (!pca) { #ifdef MULTITHREAD @@ -794,7 +792,7 @@ void covarianceBase::printNominalCurrProp() { // fParEvalLikelihood stores if we want to evaluate the likelihood for the given parameter // true = evaluate likelihood (so run with a prior) // false = don't evaluate likelihood (so run without a prior) -double covarianceBase::CalcLikelihood() { +double covarianceBase::CalcLikelihood() _noexcept_ { // ******************************************** double logL = 0.0; #ifdef MULTITHREAD @@ -851,7 +849,6 @@ void covarianceBase::printPars() { for(int i = 0; i < _fNumPar; i++) { MACH3LOG_INFO("{:s} current: \t{:.5f} \tproposed: \t{:.5f}", _fNames[i], _fCurrVal[i], _fPropVal[i]); } - return; } // ******************************************** @@ -870,8 +867,8 @@ void covarianceBase::setParameters(const std::vector& pars) { MACH3LOG_ERROR("Warning: parameter arrays of incompatible size! Not changing parameters! {} has size {} but was expecting {}", matrixName, pars.size(), _fNumPar); throw MaCh3Exception(__FILE__ , __LINE__ ); } - unsigned int parsSize = pars.size(); - for (unsigned int i = 0; i < parsSize; i++) { + int parsSize = int(pars.size()); + for (int i = 0; i < parsSize; i++) { //Make sure that you are actually passing a number to set the parameter to if(std::isnan(pars[i])) { MACH3LOG_ERROR("Error: trying to set parameter value to a nan for parameter {} in matrix {}. This will not go well!", GetParName(i), matrixName); @@ -886,7 +883,6 @@ void covarianceBase::setParameters(const std::vector& pars) { TransferToPCA(); TransferToParam(); } - return; } // ******************************************** @@ -899,7 +895,8 @@ void covarianceBase::SetBranches(TTree &tree, bool SaveProposal) { // When running PCA, also save PCA parameters if (pca) { for (int i = 0; i < _fNumParPCA; ++i) { - tree.Branch(Form("%s_PCA", _fNames[i].c_str()), static_cast(&fParCurr_PCA.GetMatrixArray()[i]), Form("%s_PCA/D", _fNames[i].c_str())); + + tree.Branch(Form("%s_PCA", _fNames[i].c_str()), &fParCurr_PCA.GetMatrixArray()[i], Form("%s_PCA/D", _fNames[i].c_str())); } } @@ -912,7 +909,8 @@ void covarianceBase::SetBranches(TTree &tree, bool SaveProposal) { // When running PCA, also save PCA parameters if (pca) { for (int i = 0; i < _fNumParPCA; ++i) { - tree.Branch(Form("%s_PCA_Prop", _fNames[i].c_str()), static_cast(&fParProp_PCA.GetMatrixArray()[i]), Form("%s_PCA_Prop/D", _fNames[i].c_str())); + + tree.Branch(Form("%s_PCA_Prop", _fNames[i].c_str()), &fParProp_PCA.GetMatrixArray()[i], Form("%s_PCA_Prop/D", _fNames[i].c_str())); } } } @@ -940,7 +938,6 @@ void covarianceBase::toggleFixAllParameters() { } else{ for (int i = 0; i < _fNumParPCA; i++) fParSigma_PCA[i] *= -1.0; } - return; } // ******************************************** @@ -968,7 +965,6 @@ void covarianceBase::toggleFixParameter(const int i) { MACH3LOG_INFO("Setting un-decomposed {}(parameter {}/{} in PCA base) to fixed at {}", GetParName(i), i, isDecom, _fCurrVal[i]); } } - return; } // ******************************************** @@ -1016,7 +1012,7 @@ void covarianceBase::setFlatPrior(const int i, const bool eL) { // ******************************************** //KS: Custom function to perform multiplication of matrix and vector with multithreading -void covarianceBase::MatrixVectorMulti(double* _restrict_ VecMulti, double** _restrict_ matrix, const double* _restrict_ vector, const int n) { +void covarianceBase::MatrixVectorMulti(double* _restrict_ VecMulti, double** _restrict_ matrix, const double* _restrict_ vector, const int n) const { // ******************************************** #ifdef MULTITHREAD #pragma omp parallel for @@ -1036,9 +1032,8 @@ void covarianceBase::MatrixVectorMulti(double* _restrict_ VecMulti, double** _re } // ******************************************** -double covarianceBase::MatrixVectorMultiSingle(double** _restrict_ matrix, const double* _restrict_ vector, const int Length, const int i) { +double covarianceBase::MatrixVectorMultiSingle(double** _restrict_ matrix, const double* _restrict_ vector, const int Length, const int i) const { // ******************************************** - double Element = 0.0; #ifdef MULTITHREAD #pragma omp simd @@ -1052,7 +1047,7 @@ double covarianceBase::MatrixVectorMultiSingle(double** _restrict_ matrix, const // ******************************************** void covarianceBase::setIndivStepScale(const std::vector& stepscale) { // ******************************************** - if ((int)stepscale.size() != _fNumPar) + if (int(stepscale.size()) != _fNumPar) { MACH3LOG_WARN("Stepscale vector not equal to number of parameters. Quitting.."); MACH3LOG_WARN("Size of argument vector: {}", stepscale.size()); @@ -1063,10 +1058,7 @@ void covarianceBase::setIndivStepScale(const std::vector& stepscale) { for (int iParam = 0 ; iParam < _fNumPar; iParam++) { _fIndivStepScale[iParam] = stepscale[iParam]; } - printIndivStepScale(); - - return; } // ******************************************** @@ -1124,8 +1116,6 @@ void covarianceBase::MakePosDef(TMatrixDSym *cov) { } //DB Resetting warning level gErrorIgnoreLevel = originalErrorWarning; - - return; } // ******************************************** @@ -1274,11 +1264,13 @@ void covarianceBase::makeClosestPosDef(TMatrixDSym *cov) { throw MaCh3Exception(__FILE__ , __LINE__ ); } - TMatrixD cov_sym_v = static_cast(cov_sym_svd.GetV()); + + TMatrixD cov_sym_v = cov_sym_svd.GetV(); TMatrixD cov_sym_vt = cov_sym_v; cov_sym_vt.T(); //SVD returns as vector (grrr) so need to get into matrix form for multiplying! - TVectorD cov_sym_sigvect = static_cast(cov_sym_svd.GetSig()); + TVectorD cov_sym_sigvect = cov_sym_svd.GetSig(); + const Int_t nCols = cov_sym_v.GetNcols(); //square so only need rows hence lack of cols TMatrixDSym cov_sym_sig(nCols); TMatrixDDiag cov_sym_sig_diag(cov_sym_sig); @@ -1306,7 +1298,7 @@ std::vector covarianceBase::getNominalArray() { { nominal[i] = _fPreFitValue[i]; } - return nominal; + return nominal; } // ******************************************** diff --git a/covariance/covarianceBase.h b/covariance/covarianceBase.h index 09634ea84..62a6d2197 100644 --- a/covariance/covarianceBase.h +++ b/covariance/covarianceBase.h @@ -7,10 +7,8 @@ #include "covariance/AdaptiveMCMCHandler.h" #include "covariance/PCAHandler.h" -#ifndef _LARGE_LOGL_ /// Large Likelihood is used it parameter go out of physical boundary, this indicates in MCMC that such step should eb removed -#define _LARGE_LOGL_ 1234567890.0 -#endif +constexpr static const double _LARGE_LOGL_ = 1234567890.0; /// @brief Base class responsible for handling of systematic error parameters. Capable of using PCA or using adaptive throw matrix /// @see For more details, visit the [Wiki](https://github.com/mach3-software/MaCh3/wiki/02.-Implementation-of-Systematic). @@ -98,7 +96,7 @@ class covarianceBase { /// @brief Check if parameters were proposed outside physical boundary virtual int CheckBounds(); /// @brief Calc penalty term based on inverted covariance matrix - double CalcLikelihood(); + double CalcLikelihood() _noexcept_; /// @brief Return CalcLikelihood if some params were thrown out of boundary return _LARGE_LOGL_ virtual double GetLikelihood(); @@ -274,17 +272,18 @@ class covarianceBase { /// @param pars vector with new values of PCA params inline void setParameters_PCA(const std::vector &pars) { if (!pca) { MACH3LOG_ERROR("Am not running in PCA mode"); throw MaCh3Exception(__FILE__ , __LINE__ ); } - if (pars.size() != size_t(_fNumParPCA)) { + if (int(pars.size()) != _fNumParPCA) { MACH3LOG_ERROR("Warning: parameter arrays of incompatible size! Not changing parameters! {} has size {} but was expecting {}", matrixName, pars.size(), _fNumPar); throw MaCh3Exception(__FILE__ , __LINE__ ); } - unsigned int parsSize = pars.size(); - for (unsigned int i = 0; i < parsSize; i++) { + int parsSize = int(pars.size()); + for (int i = 0; i < parsSize; i++) { fParProp_PCA(i) = pars[i]; } //KS: Transfer to normal base TransferToParam(); } + /// @brief Get number of params which will be different depending if using Eigen decomposition or not inline int getNpars() { if (pca) return _fNumParPCA; @@ -302,7 +301,7 @@ class covarianceBase { /// @brief Generate a new proposed state virtual void proposeStep(); /// @brief Accepted this step - void acceptStep(); + void acceptStep() _noexcept_; /// @brief fix parameters at prior values void toggleFixAllParameters(); @@ -335,13 +334,13 @@ class covarianceBase { /// @param matrix This matrix is used for multiplication VecMulti = matrix x vector /// @param VecMulti This vector is used for multiplication VecMulti = matrix x vector /// @param n this is size of matrix and vector, we assume matrix is symmetric - inline void MatrixVectorMulti(double* _restrict_ VecMulti, double** _restrict_ matrix, const double* _restrict_ vector, const int n); + inline void MatrixVectorMulti(double* _restrict_ VecMulti, double** _restrict_ matrix, const double* _restrict_ vector, const int n) const; /// @brief KS: Custom function to perform multiplication of matrix and single element which is thread safe - inline double MatrixVectorMultiSingle(double** _restrict_ matrix, const double* _restrict_ vector, const int Length, const int i); + inline double MatrixVectorMultiSingle(double** _restrict_ matrix, const double* _restrict_ vector, const int Length, const int i) const; /// @brief Getter to return a copy of the YAML node YAML::Node GetConfig() const { return _fYAMLDoc; } - protected: +protected: /// @brief Initialisation of the class using matrix from root file void init(std::string name, std::string file); /// @brief Initialisation of the class using config @@ -354,9 +353,9 @@ class covarianceBase { void ReserveMemory(const int size); /// @brief "Randomize" the parameters in the covariance class for the proposed step. Used the proposal kernel and the current parameter value to set proposed step - void randomize(); + void randomize() _noexcept_; /// @brief Use Cholesky throw matrix for better step proposal - void CorrelateSteps(); + void CorrelateSteps() _noexcept_; /// @brief Make matrix positive definite by adding small values to diagonal, necessary for inverting matrix /// @param cov Matrix which we evaluate Positive Definitiveness @@ -395,7 +394,7 @@ class covarianceBase { double **InvertCovMatrix; /// KS: Set Random numbers for each thread so each thread has different seed - TRandom3 **random_number; + std::vector> random_number; /// Random number taken from gaussian around prior error used for corr_throw double* randParams; @@ -405,7 +404,7 @@ class covarianceBase { double _fGlobalStepScale; /// KS: This is used when printing parameters, sometimes we have super long parameters name, we want to flexibly adjust couts - unsigned int PrintLength; + int PrintLength; /// ETA _fNames is set automatically in the covariance class to be something like xsec_i, this is currently to make things compatible with the Diagnostic tools std::vector _fNames; diff --git a/covariance/covarianceXsec.cpp b/covariance/covarianceXsec.cpp index fefae18ca..602d925e6 100644 --- a/covariance/covarianceXsec.cpp +++ b/covariance/covarianceXsec.cpp @@ -13,7 +13,7 @@ covarianceXsec::covarianceXsec(const std::vector& YAMLFile, std::st for (int i = 0; i < _fNumPar; i++) { // Sort out the print length - if(_fNames[i].length() > PrintLength) PrintLength = _fNames[i].length(); + if(int(_fNames[i].length()) > PrintLength) PrintLength = int(_fNames[i].length()); } // end the for loop MACH3LOG_DEBUG("Constructing instance of covarianceXsec"); @@ -160,7 +160,7 @@ XsecNorms4 covarianceXsec::GetXsecNorm(const YAML::Node& param, const int Index) int NumKinematicCuts = 0; if(param["KinematicCuts"]){ - NumKinematicCuts = param["KinematicCuts"].size(); + NumKinematicCuts = int(param["KinematicCuts"].size()); std::vector TempKinematicStrings; std::vector> TempKinematicBounds; @@ -171,9 +171,12 @@ XsecNorms4 covarianceXsec::GetXsecNorm(const YAML::Node& param, const int Index) for (YAML::const_iterator it = param["KinematicCuts"][KinVar_i].begin();it!=param["KinematicCuts"][KinVar_i].end();++it) { TempKinematicStrings.push_back(it->first.as()); TempKinematicBounds.push_back(it->second.as>()); - std::vector bounds = it->second.as>(); } - } + if(TempKinematicStrings.size() == 0) { + MACH3LOG_ERROR("Recived a KinematicCuts node but couldn't read the contents (it's a list of single-element dictionaries (python) = map of pairs (C++))"); + throw MaCh3Exception(__FILE__, __LINE__); + } + }//KinVar_i norm.KinematicVarStr = TempKinematicStrings; norm.Selection = TempKinematicBounds; } @@ -201,7 +204,7 @@ const std::vector covarianceXsec::GetGlobalSystIndexFromDetID(const int Det std::vector returnVec; for (auto &pair : _fSystToGlobalSystIndexMap[Type]) { auto &SystIndex = pair.second; - if ((GetParDetID(SystIndex) & DetID)) { //If parameter applies to required DetID + if (AppliesToDetID(SystIndex, DetID)) { //If parameter applies to required DetID returnVec.push_back(SystIndex); } } @@ -216,8 +219,8 @@ const std::vector covarianceXsec::GetSystIndexFromDetID(int DetID, const S std::vector returnVec; for (auto &pair : _fSystToGlobalSystIndexMap[Type]) { auto &SplineIndex = pair.first; - auto &SystIndex = pair.second; - if ((GetParDetID(SystIndex) & DetID)) { //If parameter applies to required DetID + auto &systIndex = pair.second; + if (AppliesToDetID(systIndex, DetID)) { //If parameter applies to required DetID returnVec.push_back(SplineIndex); } } @@ -237,7 +240,7 @@ XsecSplines1 covarianceXsec::GetXsecSpline(const YAML::Node& param) { Spline._SplineInterpolationType = SplineInterpolation(InterpType); } } else { //KS: By default use TSpline3 - Spline._SplineInterpolationType = SplineInterpolation(kTSpline3); + Spline._SplineInterpolationType = kTSpline3; } Spline._SplineKnotUpBound = GetFromManager(param["SplineInformation"]["SplineKnotUpBound"], 9999); Spline._SplineKnotLowBound = GetFromManager(param["SplineInformation"]["SplineKnotLowBound"], -9999); @@ -306,7 +309,7 @@ template void covarianceXsec::IterateOverParams(const int DetID, FilterFunc filter, ActionFunc action) { // ******************************************** for (int i = 0; i < _fNumPar; ++i) { - if ((GetParDetID(i) & DetID) && filter(i)) { // Common filter logic + if ((AppliesToDetID(i, DetID)) && filter(i)) { // Common filter logic action(i); // Specific action for each function } } @@ -385,6 +388,8 @@ void covarianceXsec::PrintNormParams() { MACH3LOG_INFO("Normalisation parameters: {}", NormParams.size()); if(_fSystToGlobalSystIndexMap[SystType::kNorm].size() == 0) return; + bool have_parameter_with_kin_bounds = false; + //KS: Consider making some class producing table.. MACH3LOG_INFO("┌────┬──────────┬────────────────────────────────────────┬────────────────────┬────────────────────┬────────────────────┐"); MACH3LOG_INFO("│{0:4}│{1:10}│{2:40}│{3:20}│{4:20}│{5:20}│", "#", "Global #", "Name", "Int. mode", "Target", "pdg"); @@ -414,8 +419,39 @@ void covarianceXsec::PrintNormParams() { if (NormParams[i].pdgs.empty()) pdgString += "all"; MACH3LOG_INFO("│{: <4}│{: <10}│{: <40}│{: <20}│{: <20}│{: <20}│", i, NormParams[i].index, NormParams[i].name, intModeString, targetString, pdgString); + + if(NormParams[i].hasKinBounds) have_parameter_with_kin_bounds = true; } MACH3LOG_INFO("└────┴──────────┴────────────────────────────────────────┴────────────────────┴────────────────────┴────────────────────┘"); + + if(have_parameter_with_kin_bounds) { + MACH3LOG_INFO("Normalisation parameters KinematicCuts information"); + MACH3LOG_INFO("┌────┬──────────┬────────────────────────────────────────┬────────────────────┬────────────────────────────────────────┐"); + MACH3LOG_INFO("│{0:4}│{1:10}│{2:40}│{3:20}│{4:40}│", "#", "Global #", "Name", "KinematicCut", "Value"); + MACH3LOG_INFO("├────┼──────────┼────────────────────────────────────────┼────────────────────┼────────────────────────────────────────┤"); + for (unsigned int i = 0; i < NormParams.size(); ++i) + { + //skip parameters with no KinematicCuts + if(!NormParams[i].hasKinBounds) continue; + + const long unsigned int ncuts = NormParams[i].KinematicVarStr.size(); + for(long unsigned int icut = 0; icut < ncuts; icut++) { + std::string kinematicCutValueString; + for(const auto & value : NormParams[i].Selection[icut]) { + kinematicCutValueString += std::to_string(value); + kinematicCutValueString += " "; + } + + if(icut == 0) + MACH3LOG_INFO("│{: <4}│{: <10}│{: <40}│{: <20}│{: <40}│", i, NormParams[i].index, NormParams[i].name, NormParams[i].KinematicVarStr[icut], kinematicCutValueString); + else + MACH3LOG_INFO("│{: <4}│{: <10}│{: <40}│{: <20}│{: <40}│", "", "", "", NormParams[i].KinematicVarStr[icut], kinematicCutValueString); + }//icut + }//i + MACH3LOG_INFO("└────┴──────────┴────────────────────────────────────────┴────────────────────┴────────────────────────────────────────┘"); + } + else + MACH3LOG_INFO("No normalisation parameters have KinematicCuts defined"); } // ******************************************** @@ -480,11 +516,11 @@ void covarianceXsec::CheckCorrectInitialisation() { // ******************************************** // KS: Lambda Function which simply checks if there are no duplicates in std::vector auto CheckForDuplicates = [](const std::vector& names, const std::string& nameType) { - std::unordered_map seenStrings; + std::unordered_map seenStrings; for (size_t i = 0; i < names.size(); ++i) { const auto& name = names[i]; if (seenStrings.find(name) != seenStrings.end()) { - int firstIndex = seenStrings[name]; + size_t firstIndex = seenStrings[name]; MACH3LOG_CRITICAL("There are two systematics with the same {} '{}', first at index {}, and again at index {}", nameType, name, firstIndex, i); throw MaCh3Exception(__FILE__, __LINE__); } @@ -515,9 +551,14 @@ void covarianceXsec::SetGroupOnlyParameters(const std::string& Group) { // Checks if parameter belongs to a given group bool covarianceXsec::IsParFromGroup(const int i, const std::string& Group) { // ******************************************** + std::string groupLower = Group; + std::string paramGroupLower = _ParameterGroup[i]; + + // KS: Convert both strings to lowercase, this way comparison will be case insensitive + std::transform(groupLower.begin(), groupLower.end(), groupLower.begin(), ::tolower); + std::transform(paramGroupLower.begin(), paramGroupLower.end(), paramGroupLower.begin(), ::tolower); - if(Group == _ParameterGroup[i]) return true; - else return false; + return groupLower == paramGroupLower; } // ******************************************** diff --git a/covariance/covarianceXsec.h b/covariance/covarianceXsec.h index 0936cc362..1dd61affa 100644 --- a/covariance/covarianceXsec.h +++ b/covariance/covarianceXsec.h @@ -127,7 +127,12 @@ class covarianceXsec : public covarianceBase { /// @param DetID The Detector ID used to filter parameters. template void IterateOverParams(const int DetID, FilterFunc filter, ActionFunc action); - + /// @brief Check if parameter is affecting given det ID + /// @param SystIndex number of parameter + /// @param DetID The Detector ID used to filter parameters. + bool AppliesToDetID(const int SystIndex, const int DetID) const { + return (GetParDetID(SystIndex) & DetID) != 0; + } /// @brief Initializes the systematic parameters from the configuration file. /// This function loads parameters like normalizations and splines from the provided YAML file. /// @note This is used internally during the object's initialization process. diff --git a/manager/CMakeLists.txt b/manager/CMakeLists.txt index b612c3dec..a67c36198 100644 --- a/manager/CMakeLists.txt +++ b/manager/CMakeLists.txt @@ -25,7 +25,9 @@ if(NOT CPU_ONLY) set_property(TARGET Manager PROPERTY CUDA_ARCHITECTURES ${CMAKE_CUDA_ARCHITECTURES}) endif() -target_link_libraries(Manager yaml-cpp spdlog ROOT::ROOT MaCh3CompilerOptions) +target_link_libraries(Manager PUBLIC MaCh3CompilerOptions ROOT::Tree ROOT::Hist yaml-cpp spdlog) +target_link_libraries(Manager PRIVATE MaCh3Warnings) + target_include_directories(Manager PUBLIC $ $) diff --git a/manager/MaCh3Modes.cpp b/manager/MaCh3Modes.cpp index 7dc802a25..fef23a63b 100644 --- a/manager/MaCh3Modes.cpp +++ b/manager/MaCh3Modes.cpp @@ -41,7 +41,7 @@ void MaCh3Modes::Print() { MACH3LOG_INFO("Printing MaCh3 Modes Called: {}", Title); MACH3LOG_INFO("========================================================================"); - MACH3LOG_INFO("{:<5} {:2} {:<20} {:2} {:<20} {:2} {:<30}", "#", "|", "Name", "|", "FancyName", "|", std::string(Generator+" Modes")); + MACH3LOG_INFO("{:<5} {:2} {:<20} {:2} {:<20} {:2} {:<30}", "#", "|", "Name", "|", "FancyName", "|", Generator+" Modes"); MACH3LOG_INFO("------------------------------------------------------------------------"); for(int i = 0; i < NModes; ++i) { auto Name = fMode[i].Name; @@ -57,7 +57,7 @@ void MaCh3Modes::Print() { MACH3LOG_INFO("========================================================================"); MACH3LOG_INFO("=========================="); - MACH3LOG_INFO("{:<10} {:2} {:<30}", std::string(Generator + " Modes"), "|", "Name"); + MACH3LOG_INFO("{:<10} {:2} {:<30}", Generator + " Modes", "|", "Name"); MACH3LOG_INFO("--------------------------"); for (size_t i = 0; i < ModeMap.size(); ++i) { MACH3LOG_INFO("{:<10} {:2} {:<30}", i, "|", fMode[ModeMap[i]].Name); @@ -71,7 +71,7 @@ MaCh3Modes_t MaCh3Modes::EnsureModeNameRegistered(std::string const &name) { if (Mode.count(name)) { return Mode[name]; } - MaCh3Modes_t index = Mode.size(); + MaCh3Modes_t index = MaCh3Modes_t(Mode.size()); Mode[name] = index; return index; } diff --git a/manager/Monitor.cpp b/manager/Monitor.cpp index 90a7f0432..02c9e7c52 100644 --- a/manager/Monitor.cpp +++ b/manager/Monitor.cpp @@ -168,7 +168,7 @@ std::string TerminalToString(std::string cmd) { // ************************ //KS: Simple to retrieve speed of get entry inspired by -void EstimateDataTransferRate(TChain* chain, const int entry){ +void EstimateDataTransferRate(TChain* chain, const Long64_t entry){ // ************************ TStopwatch timer; @@ -186,15 +186,15 @@ void EstimateDataTransferRate(TChain* chain, const int entry){ // ************************ //KS: Simply print progress bar -void PrintProgressBar(const int Done, const int All){ +void PrintProgressBar(const Long64_t Done, const Long64_t All){ // ************************ - double progress = double((double(Done)/double(All))); + double progress = double(Done)/double(All); const int barWidth = 20; std::ostringstream progressBar; progressBar << "["; - int pos = barWidth * progress; + int pos = int(barWidth * progress); for (int i = 0; i < barWidth; ++i) { if (i < pos) progressBar << "="; diff --git a/manager/Monitor.h b/manager/Monitor.h index dae7a72bd..dba876f5e 100644 --- a/manager/Monitor.h +++ b/manager/Monitor.h @@ -20,6 +20,9 @@ #include "manager/MaCh3Exception.h" #include "manager/YamlHelper.h" +/// @file Monitor.h +/// @brief System and monitoring utilities for printing system information and status updates. + namespace MaCh3Utils { /// @brief KS: Prints welcome message with MaCh3 logo void MaCh3Welcome(); @@ -34,14 +37,14 @@ namespace MaCh3Utils { /// @return The output of the terminal command as a string. std::string TerminalToString(std::string cmd); /// @brief KS: Check what CPU you are using - void EstimateDataTransferRate(TChain* chain, const int entry); + void EstimateDataTransferRate(TChain* chain, const Long64_t entry); /// @brief KS: Find out about Disk usage void GetDiskUsage(); /// @brief KS: Simply print progress bar /// @param Done The number of tasks completed. /// @param All The total number of tasks. /// @details This function prints a progress bar to the terminal, indicating the percentage of tasks completed. - void PrintProgressBar(const int Done, const int All); + void PrintProgressBar(const Long64_t Done, const Long64_t All); /// @brief KS: Get version of MaCh3 /// @return The current MaCh3 version as a string. /// @details This function fetches and returns the version of the MaCh3 software being used. diff --git a/manager/YamlHelper.h b/manager/YamlHelper.h index a1ce159d7..428087a40 100644 --- a/manager/YamlHelper.h +++ b/manager/YamlHelper.h @@ -13,6 +13,9 @@ // yaml Includes #include "yaml-cpp/yaml.h" +/// @file YamlHelper.h +/// @brief Utility functions for handling YAML nodes + // ********************** /// @brief Get content of config file if node is not found take default value specified /// @param node Yaml node @@ -225,48 +228,22 @@ inline bool compareYAMLNodes(const YAML::Node& node1, const YAML::Node& node2) { /// @brief Overrides the configuration settings based on provided arguments. /// /// This function allows you to set configuration options in a nested YAML node. -/// It accepts two, three, or four string arguments: -/// - For two arguments, the first argument is a key, and the second is the value. -/// - For three arguments, the first two arguments are keys (for nested configuration), and the third is the value. -/// - For four arguments, the first three arguments are keys (for deeper nested configuration), and the fourth is the value. -/// /// @param node YAML node that will be modified -/// @param args The arguments to override the configuration. -/// - When two arguments are provided, they represent the key and value, respectively. -/// - When three arguments are provided, they represent two keys and a value. -/// - When four arguments are provided, they represent three keys and a value. +/// @param args The arguments to override the configuration. The last argument +/// will be used as the value /// /// @note Example usage: /// @code /// OverrideConfig(config, "General", "OutputFile", "Wooimbouttamakeanameformyselfere.root"); +/// OverrideConfig(config, "General", "MyDouble", 5.3); /// @endcode +template +void OverrideConfig(YAML::Node node, std::string const &key, TValue val) { +// ********************** + node[key] = val; +} template -void OverrideConfig(YAML::Node& node, Args... args) { +void OverrideConfig(YAML::Node node, std::string const &key, Args... args) { // ********************** - static_assert(sizeof...(args) == 2 || sizeof...(args) == 3 || sizeof...(args) == 4, - "OverrideConfig accepts either 2, 3, or 4 arguments."); - - auto args_tuple = std::make_tuple(args...); // Create a tuple from the parameter pack - - if constexpr (sizeof...(args) == 2) { - std::string blarb1 = std::get<0>(args_tuple); // First argument - std::string result = std::get<1>(args_tuple); // Second argument - - node[blarb1] = result; - } - else if constexpr (sizeof...(args) == 3) { - std::string blarb1 = std::get<0>(args_tuple); // First argument - std::string blarb2 = std::get<1>(args_tuple); // Second argument - std::string result = std::get<2>(args_tuple); // Third argument - - node[blarb1][blarb2] = result; - } - else if constexpr (sizeof...(args) == 4) { - std::string blarb1 = std::get<0>(args_tuple); // First argument - std::string blarb2 = std::get<1>(args_tuple); // Second argument - std::string blarb3 = std::get<2>(args_tuple); // Third argument - std::string result = std::get<3>(args_tuple); // Fourth argument - - node[blarb1][blarb2][blarb3] = result; - } + OverrideConfig(node[key], args...); } diff --git a/manager/gpuUtils.cuh b/manager/gpuUtils.cuh index 6a34d9a85..c7ff02d85 100644 --- a/manager/gpuUtils.cuh +++ b/manager/gpuUtils.cuh @@ -1,12 +1,16 @@ #pragma once + // C i/o for printf and others #include #include // CUDA specifics - +// Because CUDA is cuda, need to make sure we don't check C-style floats... +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wold-style-cast" #include +#pragma GCC diagnostic pop #ifdef CUDA_ERROR_CHECK #include @@ -20,11 +24,13 @@ /// KS: Need it for shared memory, there is way to use dynamic shared memory but I am lazy right now #define _BlockSize_ 1024 + +/// @file gpuUtils.cuh +/// @brief Common CUDA utilities and definitions for shared GPU functionality. + /// @todo KS: There is plenty of useful stuff here https://github.com/NVIDIA/cuda-samples/blob/master/Samples/1_Utilities/deviceQuery/deviceQuery.cpp /// @todo KS: We might want to port some of these utilities, for example having bool if there is unified memory etc. -// CUDA_ERROR_CHECK is now defined in the makefile instead -//#define CUDA_ERROR_CHECK // ************************************************** // ERROR CHECKING ROUTINES diff --git a/manager/manager.cpp b/manager/manager.cpp index 6844dffc4..2007787c8 100644 --- a/manager/manager.cpp +++ b/manager/manager.cpp @@ -19,11 +19,11 @@ manager::manager(std::string const &filename) if (config["LikelihoodOptions"]) { auto likelihood = GetFromManager(config["LikelihoodOptions"]["TestStatistic"], "Barlow-Beeston"); - if (likelihood == "Barlow-Beeston") mc_stat_llh = TestStatistic(kBarlowBeeston); - else if (likelihood == "IceCube") mc_stat_llh = TestStatistic(kIceCube); - else if (likelihood == "Poisson") mc_stat_llh = TestStatistic(kPoisson); - else if (likelihood == "Pearson") mc_stat_llh = TestStatistic(kPearson); - else if (likelihood == "Dembinski-Abdelmotteleb") mc_stat_llh = TestStatistic(kDembinskiAbdelmottele); + if (likelihood == "Barlow-Beeston") mc_stat_llh = kBarlowBeeston; + else if (likelihood == "IceCube") mc_stat_llh = kIceCube; + else if (likelihood == "Poisson") mc_stat_llh = kPoisson; + else if (likelihood == "Pearson") mc_stat_llh = kPearson; + else if (likelihood == "Dembinski-Abdelmotteleb") mc_stat_llh = kDembinskiAbdelmottele; else { MACH3LOG_ERROR("Wrong form of test-statistic specified!"); MACH3LOG_ERROR("You gave {} and I only support:", likelihood); diff --git a/mcmc/CMakeLists.txt b/mcmc/CMakeLists.txt index 493b9042e..997e74a8c 100644 --- a/mcmc/CMakeLists.txt +++ b/mcmc/CMakeLists.txt @@ -33,7 +33,9 @@ set_target_properties(MCMC PROPERTIES PUBLIC_HEADER "${HEADERS}" EXPORT_NAME MCMC) -target_link_libraries(MCMC SamplePDF MaCh3CompilerOptions) +target_link_libraries(MCMC PUBLIC SamplePDF) +target_link_libraries(MCMC PRIVATE MaCh3Warnings) + target_include_directories(MCMC PUBLIC $ $ diff --git a/mcmc/FitterBase.cpp b/mcmc/FitterBase.cpp index 7875505c5..a76544c6a 100644 --- a/mcmc/FitterBase.cpp +++ b/mcmc/FitterBase.cpp @@ -5,6 +5,8 @@ #include "TTree.h" #include "TGraphAsymmErrors.h" +#pragma GCC diagnostic ignored "-Wuseless-cast" + // ************************* // Initialise the manager and make it an object of FitterBase class // Now we can dump manager settings to the output file @@ -158,8 +160,8 @@ void FitterBase::PrepareOutput() { } // Prepare the output trees - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) { - (*it)->SetBranches(*outTree, SaveProposal); + for (covarianceBase *cov : systematics) { + cov->SetBranches(*outTree, SaveProposal); } outTree->Branch("LogL", &logLCurr, "LogL/D"); @@ -257,7 +259,7 @@ void FitterBase::addSystObj(covarianceBase * const cov) { n_vec[i] = cov->getParInit(i); TVectorT t_vec(cov->GetNumParams(), n_vec.data()); - t_vec.Write((std::string(cov->getName()) + "_prior").c_str()); + t_vec.Write((cov->getName() + "_prior").c_str()); cov->getCovMatrix()->Write(cov->getName().c_str()); @@ -285,7 +287,7 @@ void FitterBase::StartFromPreviousFit(const std::string& FitName) { MACH3LOG_INFO("Getting starting position from {}", FitName); TFile *infile = new TFile(FitName.c_str(), "READ"); - TTree *posts = (TTree*)infile->Get("posteriors"); + TTree *posts = infile->Get("posteriors"); int step_val = 0; double log_val = _LARGE_LOGL_; posts->SetBranchAddress("step",&step_val); @@ -293,10 +295,10 @@ void FitterBase::StartFromPreviousFit(const std::string& FitName) { for (size_t s = 0; s < systematics.size(); ++s) { - TDirectory* CovarianceFolder = (TDirectory*)infile->Get("CovarianceFolder"); + TDirectory* CovarianceFolder = infile->Get("CovarianceFolder"); - std::string ConfigName = "Config_" + std::string(systematics[s]->getName()); - TMacro *ConfigCov = (TMacro*)(CovarianceFolder->Get(ConfigName.c_str())); + std::string ConfigName = "Config_" + systematics[s]->getName(); + TMacro *ConfigCov = CovarianceFolder->Get(ConfigName.c_str()); // KS: Not every covariance uses yaml, if it uses yaml make sure they are identical if (ConfigCov != nullptr) { // Config which was in MCMC from which we are starting @@ -507,7 +509,7 @@ void FitterBase::RunLLHScan() { int SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++ ) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++ ) { SampleSplit_LLH[SampleIterator] = outputFile->mkdir((samples[ivs]->GetSampleName(is)+ "_LLH").c_str()); SampleIterator++; @@ -518,13 +520,13 @@ void FitterBase::RunLLHScan() { const int n_points = GetFromManager(fitMan->raw()["General"]["LLHScanPoints"], 100); // We print 5 reweights - const int countwidth = double(n_points)/double(5); + const int countwidth = int(double(n_points)/double(5)); bool isxsec = false; // Loop over the covariance classes - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) + for (covarianceBase *cov : systematics) { - if (std::string((*it)->getName()) == "xsec_cov") + if (cov->getName() == "xsec_cov") { isxsec = true; } else { @@ -533,16 +535,16 @@ void FitterBase::RunLLHScan() { // Scan over all the parameters // Get the number of parameters - int npars = (*it)->GetNumParams(); - bool IsPCA = (*it)->IsPCA(); - if (IsPCA) npars = (*it)->getNpars(); + int npars = cov->GetNumParams(); + bool IsPCA = cov->IsPCA(); + if (IsPCA) npars = cov->getNpars(); for (int i = 0; i < npars; ++i) { // Get the parameter name - std::string name = (*it)->GetParName(i); + std::string name = cov->GetParName(i); if (IsPCA) name += "_PCA"; // For xsec we can get the actual name, hurray for being informative - if (isxsec) name = (*it)->GetParFancyName(i); + if (isxsec) name = cov->GetParFancyName(i); bool skip = false; for(unsigned int is = 0; is < SkipVector.size(); is++) { @@ -555,20 +557,20 @@ void FitterBase::RunLLHScan() { if(skip) continue; // Get the parameter priors and bounds - double prior = (*it)->getParInit(i); - if (IsPCA) prior = (*it)->getParCurr_PCA(i); + double prior = cov->getParInit(i); + if (IsPCA) prior = cov->getParCurr_PCA(i); // Get the covariance matrix and do the +/- nSigma double nSigma = 1; if (IsPCA) nSigma = 0.5; // Set lower and upper bounds relative the prior - double lower = prior - nSigma*(*it)->getDiagonalError(i); - double upper = prior + nSigma*(*it)->getDiagonalError(i); + double lower = prior - nSigma*cov->getDiagonalError(i); + double upper = prior + nSigma*cov->getDiagonalError(i); // If PCA, transform these parameter values to the PCA basis if (IsPCA) { - lower = prior - nSigma*sqrt(((*it)->getEigenValues())(i)); - upper = prior + nSigma*sqrt(((*it)->getEigenValues())(i)); - MACH3LOG_INFO("eval {} = {:.2f}", i, (*it)->getEigenValues()(i)); + lower = prior - nSigma*std::sqrt((cov->getEigenValues())(i)); + upper = prior + nSigma*std::sqrt((cov->getEigenValues())(i)); + MACH3LOG_INFO("eval {} = {:.2f}", i, cov->getEigenValues()(i)); MACH3LOG_INFO("prior {} = {:.2f}", i, prior); MACH3LOG_INFO("lower {} = {:.2f}", i, lower); MACH3LOG_INFO("upper {} = {:.2f}", i, upper); @@ -576,20 +578,20 @@ void FitterBase::RunLLHScan() { } // Cross-section and flux parameters have boundaries that we scan between, check that these are respected in setting lower and upper variables - if (lower < (*it)->GetLowerBound(i)) { - lower = (*it)->GetLowerBound(i); + if (lower < cov->GetLowerBound(i)) { + lower = cov->GetLowerBound(i); } - if (upper > (*it)->GetUpperBound(i)) { - upper = (*it)->GetUpperBound(i); + if (upper > cov->GetUpperBound(i)) { + upper = cov->GetUpperBound(i); } MACH3LOG_INFO("Scanning {} with {} steps, from {:.2f} - {:.2f}, prior = {:.2f}", name, n_points, lower, upper, prior); // Make the TH1D auto hScan = std::make_unique((name + "_full").c_str(), (name + "_full").c_str(), n_points, lower, upper); - hScan->SetTitle(std::string(std::string("2LLH_full, ") + name + ";" + name + "; -2(ln L_{sample} + ln L_{xsec+flux} + ln L_{det})").c_str()); + hScan->SetTitle((std::string("2LLH_full, ") + name + ";" + name + "; -2(ln L_{sample} + ln L_{xsec+flux} + ln L_{det})").c_str()); auto hScanSam = std::make_unique((name + "_sam").c_str(), (name + "_sam").c_str(), n_points, lower, upper); - hScanSam->SetTitle(std::string(std::string("2LLH_sam, ") + name + ";" + name + "; -2(ln L_{sample})").c_str()); + hScanSam->SetTitle((std::string("2LLH_sam, ") + name + ";" + name + "; -2(ln L_{sample})").c_str()); std::vector hScanSample(samples.size()); std::vector nSamLLH(samples.size()); @@ -597,7 +599,7 @@ void FitterBase::RunLLHScan() { { std::string NameTemp = samples[ivs]->GetName(); hScanSample[ivs] = new TH1D((name+"_"+NameTemp).c_str(), (name+"_" + NameTemp).c_str(), n_points, lower, upper); - hScanSample[ivs]->SetTitle(std::string(std::string("2LLH_" + NameTemp + ", ") + name + ";" + name + "; -2(ln L_{" + NameTemp +"})").c_str()); + hScanSample[ivs]->SetTitle(("2LLH_" + NameTemp + ", " + name + ";" + name + "; -2(ln L_{" + NameTemp +"})").c_str()); nSamLLH[ivs] = 0.; } @@ -609,7 +611,7 @@ void FitterBase::RunLLHScan() { NameTemp = NameTemp.substr(0, NameTemp.find("_cov")); hScanCov[ivc] = new TH1D((name+"_"+NameTemp).c_str(), (name+"_" + NameTemp).c_str(), n_points, lower, upper); - hScanCov[ivc]->SetTitle(std::string(std::string("2LLH_" + NameTemp + ", ") + name + ";" + name + "; -2(ln L_{" + NameTemp +"})").c_str()); + hScanCov[ivc]->SetTitle(("2LLH_" + NameTemp + ", " + name + ";" + name + "; -2(ln L_{" + NameTemp +"})").c_str()); nCovLLH[ivc] = 0.; } @@ -622,10 +624,10 @@ void FitterBase::RunLLHScan() { { hScanSamSplit.resize(TotalNSamples); sampleSplitllh.resize(TotalNSamples); - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++ ) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++ ) { hScanSamSplit[SampleIterator] = new TH1D((name+samples[ivs]->GetSampleName(is)).c_str(), (name+samples[ivs]->GetSampleName(is)).c_str(), n_points, lower, upper); - hScanSamSplit[SampleIterator]->SetTitle(std::string(std::string("2LLH_sam, ") + name + ";" + name + "; -2(ln L_{sample})").c_str()); + hScanSamSplit[SampleIterator]->SetTitle((std::string("2LLH_sam, ") + name + ";" + name + "; -2(ln L_{sample})").c_str()); SampleIterator++; } } @@ -639,10 +641,10 @@ void FitterBase::RunLLHScan() { // For PCA we have to do it differently if (IsPCA) { - (*it)->setParProp_PCA(i, hScan->GetBinCenter(j+1)); + cov->setParProp_PCA(i, hScan->GetBinCenter(j+1)); } else { // Set the parameter - (*it)->setParProp(i, hScan->GetBinCenter(j+1)); + cov->setParProp(i, hScan->GetBinCenter(j+1)); } // Reweight the MC @@ -675,7 +677,7 @@ void FitterBase::RunLLHScan() { int SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++) { sampleSplitllh[SampleIterator] = samples[ivs]->getSampleLikelihood(is); SampleIterator++; @@ -700,7 +702,7 @@ void FitterBase::RunLLHScan() { int SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++) { hScanSamSplit[is]->SetBinContent(j+1, 2*sampleSplitllh[is]); SampleIterator++; @@ -731,7 +733,7 @@ void FitterBase::RunLLHScan() { int SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++) { SampleSplit_LLH[SampleIterator]->cd(); hScanSamSplit[SampleIterator]->Write(); @@ -743,9 +745,9 @@ void FitterBase::RunLLHScan() { // Reset the parameters to their prior central values if (IsPCA) { - (*it)->setParProp_PCA(i, prior); + cov->setParProp_PCA(i, prior); } else { - (*it)->setParProp(i, prior); + cov->setParProp(i, prior); } }//end loop over systematics }//end loop covariance classes @@ -773,7 +775,7 @@ void FitterBase::RunLLHScan() { int SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ is = 0; is < samples[ivs]->GetNsamples(); is++ ) + for(int is = 0; is < samples[ivs]->GetNsamples(); is++ ) { SampleSplit_LLH[SampleIterator]->Write(); delete SampleSplit_LLH[SampleIterator]; @@ -796,20 +798,21 @@ void FitterBase::GetStepScaleBasedOnLLHScan() { RunLLHScan(); } - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) + for (covarianceBase *cov : systematics) { - bool isxsec = (std::string((*it)->getName()) == "xsec_cov"); + bool isxsec = (cov->getName() == "xsec_cov"); - const int npars = (*it)->GetNumParams(); + const int npars = cov->GetNumParams(); std::vector StepScale(npars); for (int i = 0; i < npars; ++i) { - std::string name = (*it)->GetParName(i); + std::string name = cov->GetParName(i); // For xsec we can get the actual name, hurray for being informative - if (isxsec) name = (*it)->GetParFancyName(i); + if (isxsec) name = cov->GetParFancyName(i); + - StepScale[i] = (*it)->GetIndivStepScale(i); - TH1D* LLHScan = static_cast(Sample_LLH->Get((name + "_sam").c_str())); + StepScale[i] = cov->GetIndivStepScale(i); + TH1D* LLHScan = Sample_LLH->Get((name+"_sam").c_str()); if(LLHScan == nullptr) { MACH3LOG_WARN("Couldn't find LLH scan, for {}, skipping", name); @@ -831,8 +834,8 @@ void FitterBase::GetStepScaleBasedOnLLHScan() { MACH3LOG_DEBUG("Sigma: {}", approxSigma); MACH3LOG_DEBUG("optimal Step Size: {}", NewStepScale); } - (*it)->setIndivStepScale(StepScale); - (*it)->SaveUpdatedMatrixConfig(); + cov->setIndivStepScale(StepScale); + cov->SaveUpdatedMatrixConfig(); } } @@ -861,9 +864,9 @@ void FitterBase::Run2DLLHScan() { bool isxsec = false; // Loop over the covariance classes - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) + for (covarianceBase *cov : systematics) { - if (std::string((*it)->getName()) == "xsec_cov") + if (cov->getName() == "xsec_cov") { isxsec = true; } else { @@ -871,32 +874,32 @@ void FitterBase::Run2DLLHScan() { } // Scan over all the parameters // Get the number of parameters - int npars = (*it)->GetNumParams(); - bool IsPCA = (*it)->IsPCA(); - if (IsPCA) npars = (*it)->getNpars(); + int npars = cov->GetNumParams(); + bool IsPCA = cov->IsPCA(); + if (IsPCA) npars = cov->getNpars(); for (int i = 0; i < npars; ++i) { - std::string name_x = (*it)->GetParName(i); + std::string name_x = cov->GetParName(i); if (IsPCA) name_x += "_PCA"; // For xsec we can get the actual name, hurray for being informative - if (isxsec) name_x = (*it)->GetParFancyName(i); + if (isxsec) name_x = cov->GetParFancyName(i); // Get the parameter priors and bounds - double prior_x = (*it)->getParInit(i); - if (IsPCA) prior_x = (*it)->getParCurr_PCA(i); + double prior_x = cov->getParInit(i); + if (IsPCA) prior_x = cov->getParCurr_PCA(i); // Get the covariance matrix and do the +/- nSigma double nSigma = 1; if (IsPCA) nSigma = 0.5; // Set lower and upper bounds relative the prior - double lower_x = prior_x - nSigma*(*it)->getDiagonalError(i); - double upper_x = prior_x + nSigma*(*it)->getDiagonalError(i); + double lower_x = prior_x - nSigma*cov->getDiagonalError(i); + double upper_x = prior_x + nSigma*cov->getDiagonalError(i); // If PCA, transform these parameter values to the PCA basis if (IsPCA) { - lower_x = prior_x - nSigma*std::sqrt(((*it)->getEigenValues())(i)); - upper_x = prior_x + nSigma*std::sqrt(((*it)->getEigenValues())(i)); - MACH3LOG_INFO("eval {} = {:.2f}", i, (*it)->getEigenValues()(i)); + lower_x = prior_x - nSigma*std::sqrt((cov->getEigenValues())(i)); + upper_x = prior_x + nSigma*std::sqrt((cov->getEigenValues())(i)); + MACH3LOG_INFO("eval {} = {:.2f}", i, cov->getEigenValues()(i)); MACH3LOG_INFO("prior {} = {:.2f}", i, prior_x); MACH3LOG_INFO("lower {} = {:.2f}", i, lower_x); MACH3LOG_INFO("upper {} = {:.2f}", i, upper_x); @@ -904,11 +907,11 @@ void FitterBase::Run2DLLHScan() { } // Cross-section and flux parameters have boundaries that we scan between, check that these are respected in setting lower and upper variables - if (lower_x < (*it)->GetLowerBound(i)) { - lower_x = (*it)->GetLowerBound(i); + if (lower_x < cov->GetLowerBound(i)) { + lower_x = cov->GetLowerBound(i); } - if (upper_x > (*it)->GetUpperBound(i)) { - upper_x = (*it)->GetUpperBound(i); + if (upper_x > cov->GetUpperBound(i)) { + upper_x = cov->GetUpperBound(i); } bool skip = false; @@ -924,10 +927,10 @@ void FitterBase::Run2DLLHScan() { for (int j = 0; j < i; ++j) { - std::string name_y = (*it)->GetParName(j); + std::string name_y = cov->GetParName(j); if (IsPCA) name_y += "_PCA"; // For xsec we can get the actual name, hurray for being informative - if (isxsec) name_y = (*it)->GetParFancyName(j); + if (isxsec) name_y = cov->GetParFancyName(j); skip = false; for(unsigned int is = 0; is < SkipVector.size(); is++) @@ -941,29 +944,30 @@ void FitterBase::Run2DLLHScan() { if(skip) continue; // Get the parameter priors and bounds - double prior_y = (*it)->getParInit(j); - if (IsPCA) prior_y = (*it)->getParCurr_PCA(j); + double prior_y = cov->getParInit(j); + if (IsPCA) prior_y = cov->getParCurr_PCA(j); // Set lower and upper bounds relative the prior - double lower_y = prior_y - nSigma*(*it)->getDiagonalError(j); - double upper_y = prior_y + nSigma*(*it)->getDiagonalError(j); + double lower_y = prior_y - nSigma*cov->getDiagonalError(j); + double upper_y = prior_y + nSigma*cov->getDiagonalError(j); // If PCA, transform these parameter values to the PCA basis if (IsPCA) { - lower_y = prior_y - nSigma*sqrt(((*it)->getEigenValues())(j)); - upper_y = prior_y + nSigma*sqrt(((*it)->getEigenValues())(j)); - MACH3LOG_INFO("eval {} = {:.2f}", i, (*it)->getEigenValues()(j)); + lower_y = prior_y - nSigma*std::sqrt((cov->getEigenValues())(j)); + upper_y = prior_y + nSigma*std::sqrt((cov->getEigenValues())(j)); + MACH3LOG_INFO("eval {} = {:.2f}", i, cov->getEigenValues()(j)); MACH3LOG_INFO("prior {} = {:.2f}", i, prior_y); MACH3LOG_INFO("lower {} = {:.2f}", i, lower_y); MACH3LOG_INFO("upper {} = {:.2f}", i, upper_y); MACH3LOG_INFO("nSigma = {:.2f}", nSigma); + } // Cross-section and flux parameters have boundaries that we scan between, check that these are respected in setting lower and upper variables - if (lower_y < (*it)->GetLowerBound(j)) { - lower_y = (*it)->GetLowerBound(j); + if (lower_y < cov->GetLowerBound(j)) { + lower_y = cov->GetLowerBound(j); } - if (upper_y > (*it)->GetUpperBound(j)) { - upper_y = (*it)->GetUpperBound(j); + if (upper_y > cov->GetUpperBound(j)) { + upper_y = cov->GetUpperBound(j); } MACH3LOG_INFO("Scanning X {} with {} steps, from {} - {}, prior = {}", name_x, n_points, lower_x, upper_x, prior_x); MACH3LOG_INFO("Scanning Y {} with {} steps, from {} - {}, prior = {}", name_y, n_points, lower_y, upper_y, prior_y); @@ -984,12 +988,12 @@ void FitterBase::Run2DLLHScan() { { // For PCA we have to do it differently if (IsPCA) { - (*it)->setParProp_PCA(i, hScanSam->GetXaxis()->GetBinCenter(x+1)); - (*it)->setParProp_PCA(j, hScanSam->GetYaxis()->GetBinCenter(y+1)); + cov->setParProp_PCA(i, hScanSam->GetXaxis()->GetBinCenter(x+1)); + cov->setParProp_PCA(j, hScanSam->GetYaxis()->GetBinCenter(y+1)); } else { // Set the parameter - (*it)->setParProp(i, hScanSam->GetXaxis()->GetBinCenter(x+1)); - (*it)->setParProp(j, hScanSam->GetYaxis()->GetBinCenter(y+1)); + cov->setParProp(i, hScanSam->GetXaxis()->GetBinCenter(x+1)); + cov->setParProp(j, hScanSam->GetYaxis()->GetBinCenter(y+1)); } // Reweight the MC @@ -1011,11 +1015,11 @@ void FitterBase::Run2DLLHScan() { hScanSam->Write(); // Reset the parameters to their prior central values if (IsPCA) { - (*it)->setParProp_PCA(i, prior_x); - (*it)->setParProp_PCA(j, prior_y); + cov->setParProp_PCA(i, prior_x); + cov->setParProp_PCA(j, prior_y); } else { - (*it)->setParProp(i, prior_x); - (*it)->setParProp(j, prior_y); + cov->setParProp(i, prior_x); + cov->setParProp(j, prior_y); } } //end loop over systematics y }//end loop over systematics X @@ -1056,17 +1060,17 @@ void FitterBase::RunSigmaVar() { } bool isxsec = false; - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) + for (covarianceBase *cov : systematics) { - TMatrixDSym *Cov = (*it)->getCovMatrix(); + TMatrixDSym *Cov = cov->getCovMatrix(); - if((*it)->IsPCA()) + if(cov->IsPCA()) { MACH3LOG_ERROR("Using PCAed matrix not implemented within sigma var code, I am sorry :("); throw MaCh3Exception(__FILE__ , __LINE__ ); } - if (std::string((*it)->getName()) == "xsec_cov") + if (cov->getName() == "xsec_cov") { isxsec = true; } else { @@ -1074,12 +1078,12 @@ void FitterBase::RunSigmaVar() { } // Loop over xsec parameters - for (int i = 0; i < (*it)->GetNumParams(); ++i) + for (int i = 0; i < cov->GetNumParams(); ++i) { // Get the parameter name - std::string name = (*it)->GetParName(i); + std::string name = cov->GetParName(i); // For xsec we can get the actual name, hurray for being informative - if (isxsec) name = (*it)->GetParFancyName(i); + if (isxsec) name = cov->GetParFancyName(i); bool skip = false; for(unsigned int is = 0; is < SkipVector.size(); is++) { @@ -1099,7 +1103,7 @@ void FitterBase::RunSigmaVar() { // Get each sample and how it's responded to our reweighted parameter for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for(_int_ k = 0; k < samples[ivs]->GetNsamples(); k++ ) + for(int k = 0; k < samples[ivs]->GetNsamples(); k++ ) { std::string title = std::string(samples[ivs]->getPDF(k)->GetName()); dirArryDial->cd(); @@ -1109,7 +1113,7 @@ void FitterBase::RunSigmaVar() { } // Get the initial value of ith parameter - double init = (*it)->getParInit(i); + double init = cov->getParInit(i); TH1D ***sigmaArray_x = new TH1D**[numVar](); TH1D ***sigmaArray_y = new TH1D**[numVar](); @@ -1131,17 +1135,17 @@ void FitterBase::RunSigmaVar() { for (int j = 0; j < numVar; ++j) { // New value = prior + variation*1sigma uncertainty - double paramVal = (*it)->getParInit(i)+sigmaArray[j]*std::sqrt((*Cov)(i,i)); + double paramVal = cov->getParInit(i)+sigmaArray[j]*std::sqrt((*Cov)(i,i)); // Check the bounds on the parameter - if (paramVal > (*it)->GetUpperBound(i)) { - paramVal = (*it)->GetUpperBound(i); - } else if (paramVal < (*it)->GetLowerBound(i)) { - paramVal = (*it)->GetLowerBound(i); + if (paramVal > cov->GetUpperBound(i)) { + paramVal = cov->GetUpperBound(i); + } else if (paramVal < cov->GetLowerBound(i)) { + paramVal = cov->GetLowerBound(i); } // Set the parameter - (*it)->setParProp(i, paramVal); + cov->setParProp(i, paramVal); // And reweight the sample for(unsigned int ivs = 0; ivs < samples.size(); ivs++) { samples[ivs]->reweight(); @@ -1162,16 +1166,17 @@ void FitterBase::RunSigmaVar() { // Get each sample and how it's responded to our reweighted parameter for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for (_int_ k = 0; k < samples[ivs]->GetNsamples(); ++k) + for (int k = 0; k < samples[ivs]->GetNsamples(); ++k) { // Make a string of the double std::ostringstream ss; ss << paramVal; - std::string parVarTitle = std::string(name) + "_" + ss.str(); + std::string parVarTitle = name + "_" + ss.str(); // This is a TH2D - TH2Poly* currSamp = static_cast(samples[ivs]->getPDF(k)->Clone()); - currSamp->SetDirectory(0); + + std::unique_ptr currSamp(static_cast(samples[ivs]->getPDF(k)->Clone())); + currSamp->SetDirectory(nullptr); // Set a descriptiv-ish title std::string title_long = std::string(currSamp->GetName())+"_"+parVarTitle; @@ -1179,7 +1184,7 @@ void FitterBase::RunSigmaVar() { //Get the 1d binning we want. Let's just use SetupBinning to get this as it already exists std::vector xbins; std::vector ybins; - samples[ivs]->SetupBinning(k, xbins, ybins); + samples[ivs]->SetupBinning(M3::int_t(k), xbins, ybins); //KS:here we loop over all reaction modes defined in "RelevantModes[nRelevantModes]" if (DoByMode) @@ -1187,38 +1192,36 @@ void FitterBase::RunSigmaVar() { sigmaArray_mode_x[j][SampleIterator] = new TH1D*[nRelevantModes](); sigmaArray_mode_y[j][SampleIterator] = new TH1D*[nRelevantModes](); // Now get the TH2D mode variations - TH2Poly** currSampMode = new TH2Poly*[nRelevantModes](); std::string mode_title_long; for(int ir = 0; ir < nRelevantModes; ir++) { - currSampMode[ir] = static_cast(samples[ivs]->getPDFMode(k, RelevantModes[ir])->Clone()); - currSampMode[ir]->SetDirectory(0); + std::unique_ptr currSampMode(static_cast(samples[ivs]->getPDFMode(k, RelevantModes[ir])->Clone())); + currSampMode->SetDirectory(nullptr); mode_title_long = title_long + "_" + Modes->GetMaCh3ModeName(RelevantModes[ir]); - currSampMode[ir]->SetNameTitle(mode_title_long.c_str(), mode_title_long.c_str()); + currSampMode->SetNameTitle(mode_title_long.c_str(), mode_title_long.c_str()); dirArrySample[SampleIterator]->cd(); - currSampMode[ir]->Write(); + currSampMode->Write(); - sigmaArray_mode_x[j][SampleIterator][ir] = PolyProjectionX(currSampMode[ir], (mode_title_long+"_xProj").c_str(), xbins); - sigmaArray_mode_x[j][SampleIterator][ir]->SetDirectory(0); - sigmaArray_mode_y[j][SampleIterator][ir] = PolyProjectionY(currSampMode[ir], (mode_title_long+"_yProj").c_str(), ybins); - sigmaArray_mode_y[j][SampleIterator][ir]->SetDirectory(0); - delete currSampMode[ir]; + sigmaArray_mode_x[j][SampleIterator][ir] = PolyProjectionX(currSampMode.get(), (mode_title_long+"_xProj").c_str(), xbins); + sigmaArray_mode_x[j][SampleIterator][ir]->SetDirectory(nullptr); + sigmaArray_mode_y[j][SampleIterator][ir] = PolyProjectionY(currSampMode.get(), (mode_title_long+"_yProj").c_str(), ybins); + sigmaArray_mode_y[j][SampleIterator][ir]->SetDirectory(nullptr); } - delete[] currSampMode; } //KS: This will give different results depending if data or Asimov, both have their uses if (PlotLLHperBin) { - TH2Poly* currLLHSamp = static_cast(samples[ivs]->getPDF(k)->Clone()); - currLLHSamp->SetDirectory(0); + std::unique_ptr currLLHSamp(static_cast(samples[ivs]->getPDF(k)->Clone())); + currLLHSamp->SetDirectory(nullptr); currLLHSamp->Reset(""); currLLHSamp->Fill(0.0, 0.0, 0.0); TH2Poly* MCpdf = static_cast(samples[ivs]->getPDF(k)); TH2Poly* Datapdf = static_cast(samples[ivs]->getData(k)); - TH2Poly* W2pdf = static_cast(samples[ivs]->getW2(k)); + TH2Poly* W2pdf = samples[ivs]->getW2(k); + for(int bin = 1; bin < currLLHSamp->GetNumberOfBins()+1; bin++) { const double mc = MCpdf->GetBinContent(bin); @@ -1229,22 +1232,21 @@ void FitterBase::RunSigmaVar() { currLLHSamp->SetNameTitle((title_long+"_LLH").c_str() ,(title_long+"_LLH").c_str()); dirArrySample[SampleIterator]->cd(); currLLHSamp->Write(); - delete currLLHSamp; } // Project down onto x axis - sigmaArray_x[j][SampleIterator] = PolyProjectionX(currSamp, (title_long+"_xProj").c_str(), xbins); - sigmaArray_x[j][SampleIterator]->SetDirectory(0); + sigmaArray_x[j][SampleIterator] = PolyProjectionX(currSamp.get(), (title_long+"_xProj").c_str(), xbins); + sigmaArray_x[j][SampleIterator]->SetDirectory(nullptr); sigmaArray_x[j][SampleIterator]->GetXaxis()->SetTitle(currSamp->GetXaxis()->GetTitle()); - sigmaArray_y[j][SampleIterator] = PolyProjectionY(currSamp, (title_long+"_yProj").c_str(), ybins); - sigmaArray_y[j][SampleIterator]->SetDirectory(0); + sigmaArray_y[j][SampleIterator] = PolyProjectionY(currSamp.get(), (title_long+"_yProj").c_str(), ybins); + sigmaArray_y[j][SampleIterator]->SetDirectory(nullptr); sigmaArray_y[j][SampleIterator]->GetXaxis()->SetTitle(currSamp->GetYaxis()->GetTitle()); sigmaArray_x_norm[j][SampleIterator] = static_cast(sigmaArray_x[j][SampleIterator]->Clone()); - sigmaArray_x_norm[j][SampleIterator]->SetDirectory(0); + sigmaArray_x_norm[j][SampleIterator]->SetDirectory(nullptr); sigmaArray_x_norm[j][SampleIterator]->Scale(1., "width"); sigmaArray_y_norm[j][SampleIterator] = static_cast(sigmaArray_y[j][SampleIterator]->Clone()); - sigmaArray_y_norm[j][SampleIterator]->SetDirectory(0); + sigmaArray_y_norm[j][SampleIterator]->SetDirectory(nullptr); sigmaArray_y_norm[j][SampleIterator]->Scale(1., "width"); currSamp->SetNameTitle(title_long.c_str(), title_long.c_str()); @@ -1253,20 +1255,19 @@ void FitterBase::RunSigmaVar() { sigmaArray_x[j][k]->Write(); sigmaArray_y[j][k]->Write(); - delete currSamp; SampleIterator++; }//End loop over samples } } // End looping over variation // Restore the parameter to prior value - (*it)->setParProp(i, init); + cov->setParProp(i, init); SampleIterator = 0; // Get each sample and how it's responded to our reweighted parameter for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for (_int_ k = 0; k < samples[ivs]->GetNsamples(); ++k) + for (int k = 0; k < samples[ivs]->GetNsamples(); ++k) { std::string title = std::string(samples[ivs]->getPDF(k)->GetName()) + "_" + name; TGraphAsymmErrors *var_x = MakeAsymGraph(sigmaArray_x[1][SampleIterator], sigmaArray_x[2][SampleIterator], sigmaArray_x[3][SampleIterator], (title+"_X").c_str()); @@ -1308,7 +1309,7 @@ void FitterBase::RunSigmaVar() { SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for (_int_ k = 0; k < samples[ivs]->GetNsamples(); ++k) + for (int k = 0; k < samples[ivs]->GetNsamples(); ++k) { dirArrySample[SampleIterator]->Close(); delete dirArrySample[SampleIterator]; @@ -1322,7 +1323,7 @@ void FitterBase::RunSigmaVar() { SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for (_int_ k = 0; k < samples[ivs]->GetNsamples(); ++k) + for (int k = 0; k < samples[ivs]->GetNsamples(); ++k) { delete sigmaArray_x[j][SampleIterator]; delete sigmaArray_y[j][SampleIterator]; @@ -1351,7 +1352,7 @@ void FitterBase::RunSigmaVar() { SampleIterator = 0; for(unsigned int ivs = 0; ivs < samples.size(); ivs++ ) { - for (_int_ k = 0; k < samples[ivs]->GetNsamples(); ++k) + for (int k = 0; k < samples[ivs]->GetNsamples(); ++k) { for(int ir = 0; ir < nRelevantModes;ir++) { diff --git a/mcmc/MCMCProcessor.cpp b/mcmc/MCMCProcessor.cpp index badf70c4b..4dad4e36d 100644 --- a/mcmc/MCMCProcessor.cpp +++ b/mcmc/MCMCProcessor.cpp @@ -8,6 +8,9 @@ #include "mcmc/gpuMCMCProcessorUtils.cuh" #endif +//this file has lots of usage of the ROOT plotting interface that only takes floats, turn this warning off for this CU for now +#pragma GCC diagnostic ignored "-Wfloat-conversion" + // **************************** MCMCProcessor::MCMCProcessor(const std::string &InputFile) : Chain(nullptr), StepCut(""), MadePostfit(false) { @@ -22,7 +25,6 @@ MCMCProcessor::MCMCProcessor(const std::string &InputFile) : StepNumber = nullptr; Posterior = nullptr; - hpost = nullptr; hpost2D = nullptr; hviolin = nullptr; hviolin_prior = nullptr; @@ -118,13 +120,10 @@ MCMCProcessor::~MCMCProcessor() { delete Errors_HPD_Positive; delete Errors_HPD_Negative; - if(hpost != nullptr) + + for (int i = 0; i < nDraw; ++i) { - for (int i = 0; i < nDraw; ++i) - { - delete hpost[i]; - } - delete[] hpost; + if(hpost[i] != nullptr) delete hpost[i]; } if(CacheMCMC) { @@ -181,7 +180,7 @@ void MCMCProcessor::GetPostfit_Ind(TVectorD *&PDF_Central, TVectorD *&PDF_Errors MakePostfit(); // Loop over the loaded param types - const int ParamTypeSize = ParamType.size(); + const int ParamTypeSize = int(ParamType.size()); int ParamNumber = 0; for (int i = 0; i < ParamTypeSize; ++i) { if (ParamType[i] != kParam) continue; @@ -217,11 +216,12 @@ void MCMCProcessor::MakeOutputFile() { gStyle->SetOptTitle(0); Posterior->SetTickx(); Posterior->SetTicky(); + Posterior->SetBottomMargin(0.1); Posterior->SetTopMargin(0.05); Posterior->SetRightMargin(0.03); Posterior->SetLeftMargin(0.15); - + //To avoid TCanvas::Print> messages gErrorIgnoreLevel = kWarning; @@ -259,8 +259,7 @@ void MCMCProcessor::MakePostfit() { // nDraw is number of draws we want to do for (int i = 0; i < nDraw; ++i) { - if (i % (nDraw/5) == 0) - { + if (i % (nDraw/5) == 0) { MaCh3Utils::PrintProgressBar(i, nDraw); } OutputFile->cd(); @@ -463,7 +462,7 @@ void MCMCProcessor::DrawPostfit() { paramPlot->SetMarkerSize(prefit->GetMarkerSize()); // Same but with Gaussian output - TH1D *paramPlot_Gauss = (TH1D*)(paramPlot->Clone()); + TH1D *paramPlot_Gauss = static_cast(paramPlot->Clone()); paramPlot_Gauss->SetMarkerColor(kOrange-5); paramPlot_Gauss->SetMarkerStyle(23); paramPlot_Gauss->SetLineWidth(2); @@ -473,7 +472,7 @@ void MCMCProcessor::DrawPostfit() { paramPlot_Gauss->SetLineColor(paramPlot_Gauss->GetMarkerColor()); // Same but with Gaussian output - TH1D *paramPlot_HPD = (TH1D*)(paramPlot->Clone()); + TH1D *paramPlot_HPD = static_cast(paramPlot->Clone()); paramPlot_HPD->SetMarkerColor(kBlack); paramPlot_HPD->SetMarkerStyle(25); paramPlot_HPD->SetLineWidth(2); @@ -719,9 +718,10 @@ void MCMCProcessor::MakeCredibleIntervals(const std::vector& CredibleInt } } - const int nCredible = CredibleIntervals.size(); + const int nCredible = int(CredibleIntervals.size()); std::vector hpost_copy(nDraw); std::vector> hpost_cl(nDraw); + //KS: Copy all histograms to be thread safe for (int i = 0; i < nDraw; ++i) { @@ -753,7 +753,7 @@ void MCMCProcessor::MakeCredibleIntervals(const std::vector& CredibleInt if(CredibleInSigmas) { //KS: Convert sigmas into percentage - const double CredInter = GetSigmaValue((int)std::round(CredibleIntervals[j])); + const double CredInter = GetSigmaValue(int(std::round(CredibleIntervals[j]))); GetCredibleInterval(hpost_copy[i], hpost_cl[i][j], CredInter); } else @@ -1106,7 +1106,7 @@ void MCMCProcessor::CacheSteps() { } MACH3LOG_INFO("Caching input tree..."); - MACH3LOG_INFO("Allocating {:.2f} MB", (sizeof(double)*nDraw*nEntries)/1.E6); + MACH3LOG_INFO("Allocating {:.2f} MB", double(sizeof(double)*nDraw*nEntries)/1.E6); TStopwatch clock; clock.Start(); @@ -1138,10 +1138,10 @@ void MCMCProcessor::CacheSteps() { } Chain->SetBranchStatus("step", true); - const int countwidth = nEntries/10; + const Long64_t countwidth = nEntries/10; // Loop over the entries //KS: This is really a bottleneck right now, thus revisit with ROOT6 https://pep-root6.github.io/docs/analysis/parallell/root.html - for (int j = 0; j < nEntries; ++j) + for (Long64_t j = 0; j < nEntries; ++j) { if (j % countwidth == 0) MaCh3Utils::PrintProgressBar(j, nEntries); @@ -1198,7 +1198,6 @@ void MCMCProcessor::CacheSteps() { MACH3LOG_INFO("Caching steps took {:.2f}s to finish for {} steps", clock.RealTime(), nEntries ); } - // ********************* // Make the post-fit covariance matrix in all dimensions void MCMCProcessor::MakeCovariance_MP(bool Mute) { @@ -1305,7 +1304,6 @@ void MCMCProcessor::MakeCovariance_MP(bool Mute) { } } - // ********************* // Based on @cite roberts2009adaptive // all credits for finding and studying it goes to Henry @@ -1580,7 +1578,7 @@ void MCMCProcessor::DrawCorrelations1D() { //KS: Plot only meaningful correlations for(int i = 0; i < nDraw; i++) { - const int size = CorrOfInterest[i].size(); + const int size = int(CorrOfInterest[i].size()); if(size == 0) continue; TH1D* Corr1DHist_Reduced = new TH1D("Corr1DHist_Reduced", "Corr1DHist_Reduced", size, 0, size); @@ -1633,7 +1631,8 @@ void MCMCProcessor::MakeCredibleRegions(const std::vector& CredibleRegio MACH3LOG_ERROR("Size of CredibleRegions is not equat to size of CredibleRegionStyle"); throw MaCh3Exception(__FILE__ , __LINE__ ); } - const int nCredible = CredibleRegions.size(); + const int nCredible = int(CredibleRegions.size()); + std::vector> hpost_2D_copy(nDraw); std::vector>> hpost_2D_cl(nDraw); @@ -1666,7 +1665,7 @@ void MCMCProcessor::MakeCredibleRegions(const std::vector& CredibleRegio if(CredibleInSigmas) { //KS: Convert sigmas into percentage - double CredReg = GetSigmaValue((int)std::round(CredibleRegions[k])); + double CredReg = GetSigmaValue(int(std::round(CredibleRegions[k]))); GetCredibleRegion(hpost_2D_cl[i][j][k], CredReg); } else @@ -1772,7 +1771,7 @@ void MCMCProcessor::MakeTrianglePlot(const std::vector& ParNames, if(hpost2D == nullptr) MakeCovariance_MP(); MACH3LOG_INFO("Making Triangle Plot"); - const int nParamPlot = ParNames.size(); + const int nParamPlot = int(ParNames.size()); std::vector ParamNumber; for(int j = 0; j < nParamPlot; ++j) { @@ -1851,8 +1850,8 @@ void MCMCProcessor::MakeTrianglePlot(const std::vector& ParNames, throw MaCh3Exception(__FILE__ , __LINE__ ); } } - const int nCredibleIntervals = CredibleIntervals.size(); - const int nCredibleRegions = CredibleRegions.size(); + const int nCredibleIntervals = int(CredibleIntervals.size()); + const int nCredibleRegions = int(CredibleRegions.size()); //KS: Initialise Tpad histograms etc we will need TPad** TrianglePad = new TPad*[Npad]; @@ -1944,7 +1943,7 @@ void MCMCProcessor::MakeTrianglePlot(const std::vector& ParNames, if(CredibleInSigmas) { //KS: Convert sigmas into percentage - const double CredReg = GetSigmaValue((int)std::round(CredibleIntervals[j])); + const double CredReg = GetSigmaValue(int(std::round(CredibleIntervals[j]))); GetCredibleInterval(hpost_copy[counterPost], hpost_cl[counterPost][j], CredReg); } else @@ -1987,11 +1986,10 @@ void MCMCProcessor::MakeTrianglePlot(const std::vector& ParNames, { hpost_2D_cl[counter2DPost][k] = static_cast(hpost2D[ParamNumber[x]][ParamNumber[y]]->Clone( Form("hpost_copy_%i_%i_CL_%f", ParamNumber[x], ParamNumber[y], CredibleRegions[k]))); - if(CredibleInSigmas) { //KS: Convert sigmas into percentage - const double CredReg = GetSigmaValue((int)std::round(CredibleRegions[k])); + const double CredReg = GetSigmaValue(int(std::round(CredibleRegions[k]))); GetCredibleRegion(hpost_2D_cl[counter2DPost][k], CredReg); } else @@ -2133,13 +2131,14 @@ void MCMCProcessor::ScanInput() { Chain = new TChain("posteriors","posteriors"); Chain->Add(MCMCFile.c_str()); - nEntries = Chain->GetEntries(); + nEntries = int(Chain->GetEntries()); //Only is suboptimality we might want to change it, therefore set it high enough so it doesn't affect other functionality UpperCut = nEntries+1; // Get the list of branches - TObjArray* brlis = static_cast(Chain->GetListOfBranches()); + + TObjArray* brlis = Chain->GetListOfBranches(); // Get the number of branches nBranches = brlis->GetEntries(); @@ -2157,6 +2156,10 @@ void MCMCProcessor::ScanInput() { { // Get the TBranch and its name TBranch* br = static_cast(brlis->At(i)); + if(!br){ + MACH3LOG_ERROR("Invalid branch at position {}", i); + throw MaCh3Exception(__FILE__,__LINE__); + } TString bname = br->GetName(); //KS: Exclude parameter types @@ -2214,7 +2217,7 @@ void MCMCProcessor::ScanInput() { nSysts++; } } - nDraw = BranchNames.size(); + nDraw = int(BranchNames.size()); // Read the input Covariances ReadInputCov(); @@ -2289,8 +2292,7 @@ void MCMCProcessor::SetupOutput() { (*Correlation)(i, j) = _UNDEF_; } } - - hpost = new TH1D*[nDraw](); + hpost.resize(nDraw); } // **************************** @@ -2362,7 +2364,7 @@ TH1D* MCMCProcessor::MakePrefit() { PreFitPlot->SetBinError(i+1, Error); PreFitPlot->GetXaxis()->SetBinLabel(i+1, ParamNames[ParamEnum][ParamNo]); } - PreFitPlot->SetDirectory(0); + PreFitPlot->SetDirectory(nullptr); PreFitPlot->SetFillStyle(1001); PreFitPlot->SetFillColor(kRed-3); @@ -2395,11 +2397,11 @@ void MCMCProcessor::FindInputFiles() { // ************************** // Now read the MCMC file TFile *TempFile = new TFile(MCMCFile.c_str(), "open"); - - TDirectory* CovarianceFolder = static_cast(TempFile->Get("CovarianceFolder")); + TDirectory* CovarianceFolder = TempFile->Get("CovarianceFolder"); // Get the settings for the MCMC - TMacro* Config = static_cast(TempFile->Get("MaCh3_Config")); + TMacro *Config = TempFile->Get("MaCh3_Config"); + if (Config == nullptr) { MACH3LOG_ERROR("Didn't find MaCh3_Config tree in MCMC file! {}", MCMCFile); TempFile->ls(); @@ -2423,7 +2425,8 @@ void MCMCProcessor::FindInputFiles() { InputNotFound = true; } - TMacro *XsecConfig = static_cast(CovarianceFolder->Get("Config_xsec_cov")); + + TMacro *XsecConfig = CovarianceFolder->Get("Config_xsec_cov"); if (XsecConfig == nullptr) { MACH3LOG_WARN("Didn't find Config_xsec_cov tree in MCMC file! {}", MCMCFile); } else { @@ -2449,7 +2452,7 @@ void MCMCProcessor::FindInputFiles() { MACH3LOG_WARN("Couldn't find OscCov branch in output"); InputNotFound = true; } - TMacro *OscConfig = (TMacro*)(CovarianceFolder->Get("Config_osc_cov")); + TMacro *OscConfig = CovarianceFolder->Get("Config_osc_cov"); if (OscConfig == nullptr) { MACH3LOG_WARN("Didn't find Config_osc_cov tree in MCMC file! {}", MCMCFile); } else { @@ -2458,19 +2461,19 @@ void MCMCProcessor::FindInputFiles() { if(InputNotFound) MaCh3Utils::PrintConfig(Settings); - if (std::getenv("MACH3") != nullptr) + if (const char * mach3_env = std::getenv("MACH3")) { - for(unsigned int i = 0; i < CovPos[kXSecPar].size(); i++) - CovPos[kXSecPar][i].insert(0, std::string(std::getenv("MACH3"))+"/"); + for(size_t i = 0; i < CovPos[kXSecPar].size(); i++) + CovPos[kXSecPar][i].insert(0, std::string(mach3_env)+"/"); - for(unsigned int i = 0; i < CovPos[kNDPar].size(); i++) - CovPos[kNDPar][i].insert(0, std::string(std::getenv("MACH3"))+"/"); + for(size_t i = 0; i < CovPos[kNDPar].size(); i++) + CovPos[kNDPar][i].insert(0, std::string(mach3_env)+"/"); - for(unsigned int i = 0; i < CovPos[kFDDetPar].size(); i++) - CovPos[kFDDetPar][i].insert(0, std::string(std::getenv("MACH3"))+"/"); + for(size_t i = 0; i < CovPos[kFDDetPar].size(); i++) + CovPos[kFDDetPar][i].insert(0, std::string(mach3_env)+"/"); - for(unsigned int i = 0; i < CovPos[kOSCPar].size(); i++) - CovPos[kOSCPar][i].insert(0, std::string(std::getenv("MACH3"))+"/"); + for(size_t i = 0; i < CovPos[kOSCPar].size(); i++) + CovPos[kOSCPar][i].insert(0, std::string(mach3_env)+"/"); } // Delete the TTrees and the input file handle since we've now got the settings we need @@ -2544,9 +2547,10 @@ void MCMCProcessor::ReadNDFile() { } NDdetFile->cd(); - TMatrixDSym *NDdetMatrix = static_cast(NDdetFile->Get("nddet_cov")); - TVectorD *NDdetNominal = static_cast(NDdetFile->Get("det_weights")); - TDirectory *BinningDirectory = static_cast(NDdetFile->Get("Binning")->Clone()); + TMatrixDSym *NDdetMatrix = NDdetFile->Get("nddet_cov"); + TVectorD *NDdetNominal = NDdetFile->Get("det_weights"); + TDirectory *BinningDirectory = NDdetFile->Get("Binning"); + for (int i = 0; i < NDdetNominal->GetNrows(); ++i) { ParamNom[kNDPar].push_back( (*NDdetNominal)(i) ); @@ -2561,10 +2565,10 @@ void MCMCProcessor::ReadNDFile() { TIter next(BinningDirectory->GetListOfKeys()); TKey *key = nullptr; // Loop through all entries - while ((key = (TKey*)next())) + while ((key = static_cast(next()))) { std::string name = std::string(key->GetName()); - TH2Poly* RefPoly = (TH2Poly*)BinningDirectory->Get((name).c_str())->Clone(); + TH2Poly* RefPoly = BinningDirectory->Get((name).c_str()); int size = RefPoly->GetNumberOfBins(); NDSamplesBins.push_back(size); NDSamplesNames.push_back(RefPoly->GetTitle()); @@ -2586,7 +2590,7 @@ void MCMCProcessor::ReadFDFile() { } FDdetFile->cd(); - TMatrixDSym *FDdetMatrix = (TMatrixDSym*)(FDdetFile->Get("SKJointError_Erec_Total")); + TMatrixDSym *FDdetMatrix = FDdetFile->Get("SKJointError_Erec_Total"); for (int i = 0; i < FDdetMatrix->GetNrows(); ++i) { @@ -2612,7 +2616,6 @@ void MCMCProcessor::ReadFDFile() { // Read the Osc cov file and get the input central values and errors void MCMCProcessor::ReadOSCFile() { // *************** - YAML::Node OscFile = CovConfig[kOSCPar];; auto systematics = OscFile["Systematics"]; @@ -2697,7 +2700,6 @@ void MCMCProcessor::GetNthParameter(const int param, double &Prior, double &Prio Prior = ParamCentral[ParType][ParamNo]; PriorError = ParamErrors[ParType][ParamNo]; Title = ParamNames[ParType][ParamNo]; - return; } // *************** @@ -2875,7 +2877,6 @@ void MCMCProcessor::GetBayesFactor(const std::vector& ParNames, MACH3LOG_INFO("Following Dunne-Kaboth Scale = ", DunneKabothScale); std::cout<& ParNames, const std::vector& EvaluationPoint, const std::vector>& Bounds){ // ************************** - if((ParNames.size() != EvaluationPoint.size()) || (Bounds.size() != EvaluationPoint.size())) { MACH3LOG_ERROR("Size doesn't match"); @@ -2919,7 +2919,7 @@ void MCMCProcessor::GetSavageDickey(const std::vector& ParNames, int ParamTemp = ParamNo - ParamTypeStartPos[ParType]; FlatPrior = ParamFlat[ParType][ParamTemp]; - TH1D* PosteriorHist = (TH1D*) hpost[ParamNo]->Clone(Title); + TH1D* PosteriorHist = static_cast(hpost[ParamNo]->Clone(Title)); RemoveFitter(PosteriorHist, "Gauss"); TH1D* PriorHist = nullptr; @@ -2942,7 +2942,7 @@ void MCMCProcessor::GetSavageDickey(const std::vector& ParNames, } else //KS: Otherwise throw from Gaussian { - PriorHist = (TH1D*) PosteriorHist->Clone("Prior"); + PriorHist = static_cast(PosteriorHist->Clone("Prior")); PriorHist->Reset(""); PriorHist->Fill(0.0, 0.0); @@ -3028,7 +3028,6 @@ void MCMCProcessor::ReweightPrior(const std::vector& Names, const std::vector& NewCentral, const std::vector& NewError) { // ************************** - MACH3LOG_INFO("Reweighting Prior"); if( (Names.size() != NewCentral.size()) || (NewCentral.size() != NewError.size())) @@ -3078,7 +3077,7 @@ void MCMCProcessor::ReweightPrior(const std::vector& Names, TFile *OutputChain = new TFile(OutputFilename.c_str(), "UPDATE"); OutputChain->cd(); - TTree *post = (TTree *)OutputChain->Get("posteriors"); + TTree *post = OutputChain->Get("posteriors"); double Weight = 1.; @@ -3131,6 +3130,8 @@ void MCMCProcessor::ReweightPrior(const std::vector& Names, void MCMCProcessor::ParameterEvolution(const std::vector& Names, const std::vector& NIntervals) { // ************************** + Int_t oldLevel = gErrorIgnoreLevel; + gErrorIgnoreLevel = kError; // Suppress warnings MACH3LOG_INFO("Parameter Evolution gif"); //KS: First we need to find parameter number based on name @@ -3146,8 +3147,10 @@ void MCMCProcessor::ParameterEvolution(const std::vector& Names, const int IntervalsSize = nSteps/NIntervals[k]; // ROOT won't overwrite gifs so we need to delete the file if it's there already - std::remove(std::string(Names[k]+".gif").c_str()); - + int ret = system(fmt::format("rm {}.gif",Names[k]).c_str()); + if (ret != 0){ + MACH3LOG_WARN("Error: system call to delete {} failed with code {}", Names[k], ret); + } // This holds the posterior density const double maxi = Chain->GetMaximum(BranchNames[ParamNo]); const double mini = Chain->GetMinimum(BranchNames[ParamNo]); @@ -3156,7 +3159,8 @@ void MCMCProcessor::ParameterEvolution(const std::vector& Names, for(int i = NIntervals[k]-1; i >= 0; --i) { // This holds the posterior density - TH1D* EvePlot = new TH1D(BranchNames[ParamNo], BranchNames[ParamNo], nBins, mini, maxi); + auto EvePlot = std::make_unique(BranchNames[ParamNo], BranchNames[ParamNo], nBins, mini, maxi); + EvePlot->SetDirectory(nullptr); EvePlot->SetMinimum(0); EvePlot->GetYaxis()->SetTitle("PDF"); EvePlot->GetYaxis()->SetNoExponent(false); @@ -3179,20 +3183,19 @@ void MCMCProcessor::ParameterEvolution(const std::vector& Names, EvePlot->Draw("HIST"); - TText *text = new TText(0.3, 0.8, TextTitle.c_str()); - text->SetTextFont (43); - text->SetTextSize (40); - text->SetNDC(true); - text->Draw("SAME"); + TText text(0.3, 0.8, TextTitle.c_str()); + text.SetTextFont (43); + text.SetTextSize (40); + text.SetNDC(true); + text.Draw("SAME"); - if(i == 0) Posterior->Print((std::string(Names[k] + ".gif++20").c_str())); // produces infinite loop animated GIF - else Posterior->Print((std::string(Names[k]+".gif+20").c_str())); // add picture to .gif - - delete EvePlot; - delete text; + if(i == 0) Posterior->Print(((Names[k] + ".gif++20").c_str())); // produces infinite loop animated GIF + else Posterior->Print(((Names[k]+".gif+20").c_str())); // add picture to .gif Counter++; } } + // Restore the original level + gErrorIgnoreLevel = oldLevel; } // ************************** @@ -3378,7 +3381,7 @@ void MCMCProcessor::PrepareDiagMCMC() { #pragma omp parallel for #endif for (int i = 0; i < nDraw; ++i) { - ParamSums[i] /= nEntries; + ParamSums[i] /= double(nEntries); for (int j = 0; j < nBatches; ++j) { // Divide by the total number of events in the batch BatchedAverages[j][i] /= BatchLength; @@ -3394,13 +3397,12 @@ void MCMCProcessor::PrepareDiagMCMC() { //CW: Draw trace plots of the parameters i.e. parameter vs step void MCMCProcessor::ParamTraces() { // ***************** - if (ParStep == nullptr) PrepareDiagMCMC(); MACH3LOG_INFO("Making trace plots..."); // Make the TH1Ds - TH1D** TraceParamPlots = new TH1D*[nDraw]; - TH1D** TraceSamplePlots = new TH1D*[nSamples]; - TH1D** TraceSystsPlots = new TH1D*[nSysts]; + std::vector TraceParamPlots(nDraw); + std::vector TraceSamplePlots(nSamples); + std::vector TraceSystsPlots(nSysts); // Set the titles and limits for TH2Ds for (int j = 0; j < nDraw; ++j) { @@ -3411,7 +3413,6 @@ void MCMCProcessor::ParamTraces() { GetNthParameter(j, Prior, PriorError, Title); std::string HistName = Form("%s_%s_Trace", Title.Data(), BranchNames[j].Data()); - TraceParamPlots[j] = new TH1D(HistName.c_str(), HistName.c_str(), nEntries, 0, nEntries); TraceParamPlots[j]->GetXaxis()->SetTitle("Step"); TraceParamPlots[j]->GetYaxis()->SetTitle("Parameter Variation"); @@ -3432,7 +3433,6 @@ void MCMCProcessor::ParamTraces() { } // Have now made the empty TH1Ds, now for writing content to them! - // Loop over the number of parameters to draw their traces // Each histogram #ifdef MULTITHREAD @@ -3444,11 +3444,9 @@ void MCMCProcessor::ParamTraces() { for (int j = 0; j < nDraw; ++j) { TraceParamPlots[j]->SetBinContent(i, ParStep[j][i]); } - for (int j = 0; j < nSamples; ++j) { TraceSamplePlots[j]->SetBinContent(i, SampleValues[i][j]); } - for (int j = 0; j < nSysts; ++j) { TraceSystsPlots[j]->SetBinContent(i, SystValues[i][j]); } @@ -3459,14 +3457,13 @@ void MCMCProcessor::ParamTraces() { TraceDir->cd(); for (int j = 0; j < nDraw; ++j) { // Fit a linear function to the traces - TF1 *Fitter = new TF1("Fitter","[0]", int(nEntries/2), nEntries); + TF1 *Fitter = new TF1("Fitter","[0]", nEntries/2, nEntries); Fitter->SetLineColor(kRed); TraceParamPlots[j]->Fit("Fitter","Rq"); TraceParamPlots[j]->Write(); delete Fitter; delete TraceParamPlots[j]; } - delete[] TraceParamPlots; TDirectory *LLDir = OutputFile->mkdir("LogL"); LLDir->cd(); @@ -3475,7 +3472,6 @@ void MCMCProcessor::ParamTraces() { delete TraceSamplePlots[j]; delete[] SampleValues[j]; } - delete[] TraceSamplePlots; delete[] SampleValues; for (int j = 0; j < nSysts; ++j) { @@ -3483,7 +3479,6 @@ void MCMCProcessor::ParamTraces() { delete TraceSystsPlots[j]; delete SystValues[j]; } - delete[] TraceSystsPlots; delete[] SystValues; TraceDir->Close(); @@ -3496,7 +3491,6 @@ void MCMCProcessor::ParamTraces() { //KS: Calculate autocorrelations supports both OpenMP and CUDA :) void MCMCProcessor::AutoCorrelation() { // ********************************* - if (ParStep == nullptr) PrepareDiagMCMC(); TStopwatch clock; @@ -3505,15 +3499,15 @@ void MCMCProcessor::AutoCorrelation() { MACH3LOG_INFO("Making auto-correlations for nLags = {}", nLags); // The sum of (Y-Ymean)^2 over all steps for each parameter - double **DenomSum = new double*[nDraw](); - double **NumeratorSum = new double*[nDraw](); - double **LagL = new double*[nDraw]; + std::vector> DenomSum(nDraw); + std::vector> NumeratorSum(nDraw); + std::vector> LagL(nDraw); for (int j = 0; j < nDraw; ++j) { - DenomSum[j] = new double[nLags]; - NumeratorSum[j] = new double[nLags]; - LagL[j] = new double[nLags]; + DenomSum[j].resize(nLags); + NumeratorSum[j].resize(nLags); + LagL[j].resize(nLags); } - TH1D** LagKPlots = new TH1D*[nDraw]; + std::vector LagKPlots(nDraw); // Loop over the parameters of interest for (int j = 0; j < nDraw; ++j) { @@ -3555,7 +3549,6 @@ void MCMCProcessor::AutoCorrelation() { const double Product = Diff*LagTerm; NumeratorSum[j][k] += Product; } - // Square the difference to form the denominator const double Denom = Diff*Diff; DenomSum[j][k] += Denom; @@ -3617,19 +3610,10 @@ void MCMCProcessor::AutoCorrelation() { LagKPlots[j]->Write(); delete LagKPlots[j]; } - delete[] LagKPlots; //KS: This is different diagnostic however it relies on calculated Lag, thus we call it before we delete LagKPlots CalculateESS(nLags, LagL); - for (int j = 0; j < nDraw; ++j) { - delete[] NumeratorSum[j]; - delete[] DenomSum[j]; - delete[] LagL[j]; - } - delete[] NumeratorSum; - delete[] DenomSum; - delete[] LagL; delete[] ParamSums; AutoCorrDir->Close(); @@ -3726,11 +3710,11 @@ void MCMCProcessor::PrepareGPU_AutoCorr(const int nLags) { // KS: calc Effective Sample Size Following @cite StanManual // Furthermore we calculate Sampling efficiency following @cite hanson2008mcmc // Rule of thumb is to have efficiency above 25% -void MCMCProcessor::CalculateESS(const int nLags, double** LagL) { +void MCMCProcessor::CalculateESS(const int nLags, const std::vector>& LagL) { // ************************** - if(LagL == nullptr) + if(LagL.size() == 0) { - MACH3LOG_ERROR("LagL is nullptr"); + MACH3LOG_ERROR("Size of LagL is 0"); throw MaCh3Exception(__FILE__ , __LINE__ ); } MACH3LOG_INFO("Making ESS plots..."); @@ -3777,7 +3761,7 @@ void MCMCProcessor::CalculateESS(const int nLags, double** LagL) { TempDenominator[j] += LagL[j][k]; } TempDenominator[j] = 1+2*TempDenominator[j]; - (*EffectiveSampleSize)(j) = nEntries/TempDenominator[j]; + (*EffectiveSampleSize)(j) = double(nEntries)/TempDenominator[j]; // 100 because we convert to percentage (*SamplingEfficiency)(j) = 100 * 1/TempDenominator[j]; @@ -3786,7 +3770,7 @@ void MCMCProcessor::CalculateESS(const int nLags, double** LagL) { EffectiveSampleSizeHist[i]->SetBinContent(j+1, 0); EffectiveSampleSizeHist[i]->SetBinError(j+1, 0); - const double TempEntry = std::fabs((*EffectiveSampleSize)(j)) / nEntries; + const double TempEntry = std::fabs((*EffectiveSampleSize)(j)) / double(nEntries); if(Thresholds[i] >= TempEntry && TempEntry > Thresholds[i+1]) { if( std::isnan((*EffectiveSampleSize)(j)) ) continue; @@ -3831,11 +3815,10 @@ void MCMCProcessor::CalculateESS(const int nLags, double** LagL) { //CW: Batched means, literally read from an array and chuck into TH1D void MCMCProcessor::BatchedMeans() { // ************************** - if (BatchedAverages == nullptr) PrepareDiagMCMC(); MACH3LOG_INFO("Making BatchedMeans plots..."); - TH1D ** BatchedParamPlots = new TH1D*[nDraw]; + std::vector BatchedParamPlots(nDraw); for (int j = 0; j < nDraw; ++j) { TString Title = ""; double Prior = 1.0; @@ -3871,7 +3854,6 @@ void MCMCProcessor::BatchedMeans() { delete Fitter; delete BatchedParamPlots[j]; } - delete[] BatchedParamPlots; //KS: Get the batched means variance estimation and variable indicating if number of batches is sensible // We do this before deleting BatchedAverages @@ -4054,7 +4036,7 @@ void MCMCProcessor::PowerSpectrumAnalysis() { std::complex exp_temp(0, two_pi_over_N * jj * n); a_j += ParStep[j][n] * std::exp(exp_temp); } - a_j /= float(std::sqrt(float(_N))); + a_j /= std::sqrt(float(_N)); const int _c = jj - start; k_j[j][_c] = two_pi_over_N * jj; @@ -4066,11 +4048,10 @@ void MCMCProcessor::PowerSpectrumAnalysis() { TDirectory *PowerDir = OutputFile->mkdir("PowerSpectrum"); PowerDir->cd(); - TGraph **plot = new TGraph*[nPrams]; TVectorD* PowerSpectrumStepSize = new TVectorD(nPrams); for (int j = 0; j < nPrams; ++j) { - plot[j] = new TGraph(v_size, k_j[j].data(), P_j[j].data()); + TGraph* plot = new TGraph(v_size, k_j[j].data(), P_j[j].data()); TString Title = ""; double Prior = 1.0; @@ -4080,10 +4061,10 @@ void MCMCProcessor::PowerSpectrumAnalysis() { std::string name = Form("Power Spectrum of %s;k;P(k)", Title.Data()); - plot[j]->SetTitle(name.c_str()); + plot->SetTitle(name.c_str()); name = Form("%s_power_spectrum", Title.Data()); - plot[j]->SetName(name.c_str()); - plot[j]->SetMarkerStyle(7); + plot->SetName(name.c_str()); + plot->SetMarkerStyle(7); // Equation 18 TF1 *func = new TF1("power_template", "[0]*( ([1] / x)^[2] / (([1] / x)^[2] +1) )", 0.0, 1.0); @@ -4099,23 +4080,21 @@ void MCMCProcessor::PowerSpectrumAnalysis() { func->SetParLimits(1, 0.001, 1.0); // k* should be within a reasonable range func->SetParLimits(2, 0.0, 5.0); // alpha should be positive - plot[j]->Fit("power_template","Rq"); + plot->Fit("power_template","Rq"); Posterior->SetLogx(); Posterior->SetLogy(); Posterior->SetGrid(); - plot[j]->Write(plot[j]->GetName()); - plot[j]->Draw("AL"); + plot->Write(plot->GetName()); + plot->Draw("AL"); func->Draw("SAME"); if(printToPDF) Posterior->Print(CanvasName); //KS: I have no clue what is the reason behind this. Found this in Rick Calland code... (*PowerSpectrumStepSize)(j) = std::sqrt(func->GetParameter(0)/float(v_size*0.5)); - delete func; - delete plot[j]; + delete plot; } - delete [] plot; PowerSpectrumStepSize->Write("PowerSpectrumStepSize"); delete PowerSpectrumStepSize; diff --git a/mcmc/MCMCProcessor.h b/mcmc/MCMCProcessor.h index 860842fb2..ef91d7a00 100644 --- a/mcmc/MCMCProcessor.h +++ b/mcmc/MCMCProcessor.h @@ -159,6 +159,11 @@ class MCMCProcessor { void ParameterEvolution(const std::vector& Names, const std::vector& NIntervals); + /// @brief Thin MCMC Chain, to save space and maintain low autocorrelations. + /// @param ThinningCut every which entry you want to thin + /// @param Average If true will perform MCMC averaging instead of thinning + inline void ThinMCMC(const int ThinningCut) { ThinningMCMC(MCMCFile+".root", ThinningCut); }; + /// @brief KS: Perform MCMC diagnostic including Autocorrelation, Trace etc. void DiagMCMC(); @@ -205,9 +210,9 @@ class MCMCProcessor { /// @brief Get parameter number based on name int GetParamIndexFromName(const std::string& Name); /// @brief Get Number of entries that Chain has, for merged chains will not be the same Nsteps - inline int GetnEntries(){return nEntries;}; + inline Long64_t GetnEntries(){return nEntries;}; /// @brief Get Number of Steps that Chain has, for merged chains will not be the same nEntries - inline int GetnSteps(){return nSteps;}; + inline Long64_t GetnSteps(){return nSteps;}; /// @brief Set the step cutting by string /// @param Cuts string telling cut value @@ -242,6 +247,7 @@ class MCMCProcessor { /// @brief Sett output suffix, this way jobs using the same file will have different names inline void SetOutputSuffix(const std::string Suffix){OutputSuffix = Suffix; }; + /// @brief Allow to set addtional cuts based on ROOT TBrowser cut, for to only affect one mass ordering inline void SetPosterior1DCut(const std::string Cut){Posterior1DCut = Cut; }; private: /// @brief Prepare prefit histogram for parameter overlay plot @@ -291,7 +297,7 @@ class MCMCProcessor { /// \cite StanManual /// \cite hanson2008mcmc /// \cite gabry2024visual - inline void CalculateESS(const int nLags, double **LagL); + inline void CalculateESS(const int nLags, const std::vector>& LagL); /// @brief Get the batched means variance estimation and variable indicating if number of batches is sensible /// \cite chakraborty2019estimating /// \cite rossetti2024batch @@ -304,7 +310,8 @@ class MCMCProcessor { inline void GewekeDiagnostic(); /// @brief Acceptance Probability inline void AcceptanceProbabilities(); - /// @brief RC: Perform spectral analysis of MCMC based on \cite Dunkley:2004sv + /// @brief RC: Perform spectral analysis of MCMC + /// \cite Dunkley:2004sv inline void PowerSpectrumAnalysis(); /// Name of MCMC file @@ -316,7 +323,6 @@ class MCMCProcessor { /// Covariance matrix config std::vector CovConfig; - /// Main chain storing all steps etc TChain *Chain; /// BurnIn Cuts @@ -438,7 +444,7 @@ class MCMCProcessor { TMatrixDSym *Correlation; /// Holds 1D Posterior Distributions - TH1D **hpost; + std::vector hpost; /// Holds 2D Posterior Distributions TH2D ***hpost2D; /// Holds violin plot for all dials diff --git a/mcmc/MaCh3Factory.cpp b/mcmc/MaCh3Factory.cpp index 0b17aed37..e257bc585 100644 --- a/mcmc/MaCh3Factory.cpp +++ b/mcmc/MaCh3Factory.cpp @@ -3,7 +3,7 @@ // ******************************************** -std::unique_ptr MaCh3FitterFactory(manager *fitMan, std::vector& Samples, std::vector& Covariances) { +std::unique_ptr MaCh3FitterFactory(manager *fitMan) { // ******************************************** std::unique_ptr MaCh3Fitter = nullptr; @@ -24,61 +24,11 @@ std::unique_ptr MaCh3FitterFactory(manager *fitMan, std::vectoraddSamplePDF(Samples[i]); - for(unsigned int i = 0; Covariances.size(); i++) - MaCh3Fitter->addSystObj(Covariances[i]); - return MaCh3Fitter; } // ******************************************** covarianceXsec* MaCh3CovarianceFactory(manager *FitManager, const std::string& PreFix) { // ******************************************** - // config for our matrix - YAML::Node Settings = FitManager->raw()["General"]["Systematics"]; - auto CovMatrixName = Settings[std::string(PreFix) + "CovName"].as(); - MACH3LOG_INFO("Initialising {} matrix", CovMatrixName); - - // yaml files initialising out matrix - auto CovMatrixFile = Settings[std::string(PreFix) + "CovFile"].as>(); - - // PCA threshold, -1 means no pca - auto PCAThreshold = GetFromManager(Settings[std::string(PreFix) + "PCAThreshold"], -1); - // do we pca whole matrix or only submatrix - auto PCAParamRegion = GetFromManager>(Settings[std::string(PreFix) + "PCAParams"], {-999, -999}); - - /// @todo this massive hack with "xsec_cov" is because we have const char * ... will have to fix it later... - covarianceXsec* xsec = new covarianceXsec(CovMatrixFile, "xsec_cov", PCAThreshold, PCAParamRegion[0], PCAParamRegion[1]); - - // Fill the parameter values with their nominal values - // should _ALWAYS_ be done before overriding with fix or flat - xsec->setParameters(); - - auto FixParams = GetFromManager>(Settings[std::string(PreFix) + "FixParams"], {}); - - // Fixed xsec parameters loop - if (FixParams.size() == 1 && FixParams.at(0) == "All") { - for (int j = 0; j < xsec->GetNumParams(); j++) { - xsec->toggleFixParameter(j); - } - } else { - for (unsigned int j = 0; j < FixParams.size(); j++) { - xsec->toggleFixParameter(FixParams.at(j)); - } - } - //Global step scale for matrix - auto StepScale = Settings[std::string(PreFix) + "StepScale"].as(); - - xsec->setStepScale(StepScale); - - // Adaptive MCMC stuff - if(FitManager->raw()["AdaptionOptions"]) - xsec->initialiseAdaption(FitManager->raw()); - - MACH3LOG_INFO("Factory successful"); - - return xsec; + return MaCh3CovarianceFactory(FitManager, PreFix); } diff --git a/mcmc/MaCh3Factory.h b/mcmc/MaCh3Factory.h index 47b7efdf8..160c6f8cc 100644 --- a/mcmc/MaCh3Factory.h +++ b/mcmc/MaCh3Factory.h @@ -22,10 +22,14 @@ /// @code /// General: /// FittingAlgorithm: ["MCMC"] -std::unique_ptr MaCh3FitterFactory(manager *fitMan, std::vector& Samples, std::vector& Covariances); +std::unique_ptr MaCh3FitterFactory(manager *fitMan); +/// @brief Factory function for creating a covariance class for systematic handling. +covarianceXsec* MaCh3CovarianceFactory(manager *FitManager, const std::string& PreFix); -/// @brief Factory function for creating a covariance matrix for systematic handling. + +// ******************************************** +/// @brief Factory function for creating a covariance class for systematic handling. /// /// @param fitMan Pointer to the manager class that holds the configuration settings. /// @param name Prefix, for example Xsec, then code will look for XsecCovFile @@ -50,6 +54,89 @@ std::unique_ptr MaCh3FitterFactory(manager *fitMan, std::vector +CovType* MaCh3CovarianceFactory(manager *FitManager, const std::string& PreFix){ +// ******************************************** + // config for our matrix + YAML::Node Settings = FitManager->raw()["General"]["Systematics"]; + auto CovMatrixName = Settings[std::string(PreFix) + "CovName"].as(); + MACH3LOG_INFO("Initialising {} matrix", CovMatrixName); + + // yaml files initialising out matrix + auto CovMatrixFile = Settings[std::string(PreFix) + "CovFile"].as>(); + + // PCA threshold, -1 means no pca + auto PCAThreshold = GetFromManager(Settings[std::string(PreFix) + "PCAThreshold"], -1); + // do we pca whole matrix or only submatrix + auto PCAParamRegion = GetFromManager>(Settings[std::string(PreFix) + "PCAParams"], {-999, -999}); + + CovType* CovObject = new CovType(CovMatrixFile, CovMatrixName, PCAThreshold, PCAParamRegion[0], PCAParamRegion[1]); + + // Fill the parameter values with their nominal values + // should _ALWAYS_ be done before overriding with fix or flat + CovObject->setParameters(); + + auto FixParams = GetFromManager>(Settings[std::string(PreFix) + "Fix"], {}); + + // Fixed CovObject parameters loop + if (FixParams.size() == 1 && FixParams.at(0) == "All") { + for (int j = 0; j < CovObject->GetNumParams(); j++) { + CovObject->toggleFixParameter(j); + } + } else { + for (unsigned int j = 0; j < FixParams.size(); j++) { + CovObject->toggleFixParameter(FixParams.at(j)); + } + } + //Global step scale for matrix + auto StepScale = Settings[std::string(PreFix) + "StepScale"].as(); + + CovObject->setStepScale(StepScale); + + // Adaptive MCMC stuff + if(FitManager->raw()["AdaptionOptions"]) + CovObject->initialiseAdaption(FitManager->raw()); + + MACH3LOG_INFO("Factory successful"); + + return CovObject; +} + +// ******************************************** +/// @brief Factory function for creating SamplePDF and initialisation with systematic. +/// +/// @tparam SampleType The class type of the sample to create, e.g., `samplePDFTutorial`. +/// @param SampleConfig Path to sample config. +/// @param xsec A pointer to a covarianceXsec object for cross-section systematic settings. +/// @param osc (Optional) A pointer to a covarianceOsc object for oscillation systematic settings. +/// @return Vector of SampleType object, initialized and ready for use. +/// +/// @note Example +/// ```cpp +/// auto mySamples = MaCh3SamplePDFFactory(SampleConfig, xsec, osc); +/// ``` +template +std::vector MaCh3SamplePDFFactory(const std::vector& SampleConfig, + covarianceXsec* xsec, + covarianceOsc* osc = nullptr) { +// ******************************************** + std::vector PDFs(SampleConfig.size()); + for (size_t i = 0; i < SampleConfig.size(); ++i) + { + // Instantiate the sample using the specified class type + SampleType* Sample = new SampleType(SampleConfig[i], xsec); + Sample->SetXsecCov(xsec); + if (osc != nullptr) Sample->SetOscCov(osc); + Sample->reweight(); + // Obtain sample name and create a TString version for histogram naming + std::string name = Sample->GetName(); + TString NameTString = TString(name.c_str()); + // Clone the 1D histogram with a modified name + TH1D* SampleHistogramPrior = static_cast(Sample->get1DHist()->Clone(NameTString + "_Prior")); + Sample->addData(SampleHistogramPrior); + PDFs[i] = Sample; + } + return PDFs; +} diff --git a/mcmc/MinuitFit.cpp b/mcmc/MinuitFit.cpp index e54feb94a..f2107db20 100644 --- a/mcmc/MinuitFit.cpp +++ b/mcmc/MinuitFit.cpp @@ -37,7 +37,7 @@ void MinuitFit::runMCMC() { //KS: add config or something minuit->SetPrintLevel(2); minuit->SetTolerance(0.01); - minuit->SetMaxFunctionCalls(fitMan->raw()["General"]["Minuit2"]["NSteps"].as()); + minuit->SetMaxFunctionCalls(fitMan->raw()["General"]["Minuit2"]["NSteps"].as()); minuit->SetMaxIterations(10000); MACH3LOG_INFO("Preparing Minuit"); diff --git a/mcmc/PSO.cpp b/mcmc/PSO.cpp index 1fe496c56..96e42b42f 100644 --- a/mcmc/PSO.cpp +++ b/mcmc/PSO.cpp @@ -1,7 +1,10 @@ #include "PSO.h" -PSO::PSO(manager *man) : LikelihoodFit(man) { +#include +// *************** +PSO::PSO(manager *man) : LikelihoodFit(man) { +// *************** fConstriction = fitMan->raw()["General"]["PSO"]["Constriction"].as(); fInertia = fitMan->raw()["General"]["PSO"]["Inertia"].as()*fConstriction; fOne = fitMan->raw()["General"]["PSO"]["One"].as()*fConstriction; @@ -18,8 +21,9 @@ PSO::PSO(manager *man) : LikelihoodFit(man) { } } +// *************** void PSO::runMCMC(){ - +// *************** PrepareFit(); if(fTestLikelihood){ @@ -39,17 +43,16 @@ void PSO::runMCMC(){ return; } - +// ************************* void PSO::init(){ - +// ************************* fBestValue = 1234567890.0; //KS: For none PCA this will be eqaul to normal parameters //const int NparsPSOFull = NPars; //const int NparsPSO = NParsPCA; - std::cout << "Preparing PSO" << std::endl; - + MACH3LOG_INFO("Preparing PSO"); // Initialise bounds on parameters if(fTestLikelihood){ for (int i = 0; i < fDim; i++){ @@ -103,9 +106,9 @@ void PSO::init(){ } } - std::cout << "Printing Minimums and Maximums of Variables to be minimised" << std::endl; - for (int i =0; i > PSO::bisection(std::vectorposition,double minimum, double range, double precision){ +// ************************* std::vector> uncertainties_list; - for (unsigned int i = 0; i< position.size(); ++i){ - std::cout << i << std::endl; + for (unsigned int i = 0; i< position.size(); ++i) { + MACH3LOG_INFO("{}", i); //std::vector uncertainties; std::vector new_position = position; new_position[i] = position[i]-range; double val_1 = CalcChi(new_position)-minimum-1.0; @@ -165,7 +170,7 @@ std::vector > PSO::bisection(std::vectorposition,dou value_list[2] = value_list[1]; value_list[1] = new_val; position_list[1] = new_bisect_position; - res = abs(position[2]-position[0]); + res = std::abs(position[2]-position[0]); } else{ std::vector new_bisect_position = position_list[1];new_bisect_position[i] += (position_list[2][i]-position_list[1][i])/2; @@ -174,7 +179,7 @@ std::vector > PSO::bisection(std::vectorposition,dou value_list[0] = value_list[1]; value_list[1] = new_val; position_list[1] = new_bisect_position; - res = abs(position_list[2][i]-position_list[1][i]); + res = std::abs(position_list[2][i]-position_list[1][i]); } } //do the same thing for position uncertainty @@ -197,9 +202,10 @@ std::vector > PSO::bisection(std::vectorposition,dou value_list_p[2] = value_list_p[1]; value_list_p[1] = new_val_p; position_list_p[1] = new_bisect_position_p; - res = abs(position[2]-position[0]); - res_p = abs(position_list_p[1][i]-position_list_p[0][i]); - //std::cout << "Pos midpoint is " << position_list_p[1][i] << std::endl; + res = std::abs(position[2]-position[0]); + res_p = std::abs(position_list_p[1][i]-position_list_p[0][i]); + MACH3LOG_TRACE("Pos midpoint is {:.2f}", position_list_p[1][i]); + } else{ std::vector new_bisect_position_p = position_list_p[1];new_bisect_position_p[i] += (position_list_p[2][i]-position_list_p[1][i])/2; @@ -208,18 +214,21 @@ std::vector > PSO::bisection(std::vectorposition,dou value_list_p[0] = value_list_p[1]; value_list_p[1] = new_val_p; position_list_p[1] = new_bisect_position_p; - res_p = abs(position_list_p[2][i]-position_list_p[1][i]); - //std::cout << "Pos midpoint is " << position_list_p[1][i] << std::endl; + res_p = std::abs(position_list_p[2][i]-position_list_p[1][i]); + MACH3LOG_TRACE("Pos midpoint is {:.2f}", position_list_p[1][i]); } } - uncertainties_list.push_back({abs(position[i]-position_list[1][i]),abs(position[i]-position_list_p[1][i])}); - std::cout << "Uncertainty finished for d = "<< i << std::endl; - std::cout << std::setprecision(10)<< "LLR values for ± positive and negative uncertainties are " << CalcChi(position_list[1]) << " and " << CalcChi(position_list_p[1]) << std::endl; + uncertainties_list.push_back({std::abs(position[i]-position_list[1][i]),std::abs(position[i]-position_list_p[1][i])}); + MACH3LOG_INFO("Uncertainty finished for d = {}", i); + MACH3LOG_INFO("LLR values for ± positive and negative uncertainties are {:<10.2f} and {:<10.2f}", + CalcChi(position_list[1]), CalcChi(position_list_p[1])); } return uncertainties_list; } -std::vector> PSO::calc_uncertainty(std::vectorposition,double minimum) { +// ************************* +std::vector> PSO::calc_uncertainty(std::vectorposition, double minimum) { +// ************************* std::vector pos_uncertainty(position.size()); std::vector neg_uncertainty(position.size()); int num = 200; @@ -244,16 +253,16 @@ std::vector> PSO::calc_uncertainty(std::vectorpositi pos[i] = curr_ival; int closest_index = 0; - double closest_value = abs(y[0]); // Initialize with the first element + double closest_value = std::abs(y[0]); // Initialize with the first element for (unsigned int ii = 1; ii < y.size(); ++ii) { - double abs_y = abs(y[ii]); + double abs_y = std::abs(y[ii]); if (abs_y < closest_value) { closest_index = ii; closest_value = abs_y; } } neg_uncertainty[i] = x[closest_index]; - std::cout << "Neg" << std::endl; + MACH3LOG_INFO("Neg"); x.assign(num, 0); y.assign(num, 0); StepPoint = (pos_stop-start) / (num - 1); @@ -267,9 +276,9 @@ std::vector> PSO::calc_uncertainty(std::vectorpositi } pos[i] = curr_ival; closest_index = 0; - closest_value = abs(y[0]); // Initialize with the first element + closest_value = std::abs(y[0]); // Initialize with the first element for (unsigned int ii = 1; ii < y.size(); ++ii) { - double abs_y = abs(y[ii]); + double abs_y = std::abs(y[ii]); if (abs_y < closest_value) { closest_index = ii; closest_value = abs_y; @@ -281,11 +290,13 @@ std::vector> PSO::calc_uncertainty(std::vectorpositi return res; } +// ************************* void PSO::uncertainty_check(std::vector previous_pos){ +// ************************* std::vector> x_list; std::vector> y_list; std::vector position = previous_pos; - int num = 5000; + constexpr int num = 5000; for (unsigned int i = 0;i previous_pos){ std::vector y(num); double StepPoint = (stop - start) / (num - 1); double value = start; - // std::cout << "result for fDim " << 1 << std::endl; - for (int j =0;j< num; ++j){ + MACH3LOG_TRACE("result for fDim: {}", 1); + for (int j = 0;j < num; ++j) { position[i] = value; double LLR = CalcChi(position); x[j] = value; @@ -303,21 +314,24 @@ void PSO::uncertainty_check(std::vector previous_pos){ value += StepPoint; } position[i] = curr_ival; - std::cout << " " << std::endl; - std::cout << "For fDim" << i+1 << " x list is " ; - for (unsigned int k= 0;k total_pos(fDim,0.0); for (int i = 0; i < fParticles; ++i) { @@ -375,8 +389,9 @@ double PSO::swarmIterate(){ return mean_dist_sq; } +// ************************* void PSO::run() { - +// ************************* double mean_dist_sq = 0; int iter = 0; @@ -396,11 +411,11 @@ void PSO::run() { accCount++; if (i%100 == 0){ - std::cout << "Mean Dist Sq = " << mean_dist_sq <get_personal_best_position()[j]); } } if(fConvergence > 0.0){ @@ -409,18 +424,20 @@ void PSO::run() { } } } - std::cout << "Finished after " << iter <<" runs out of "<< fIterations << std::endl; - std::cout << "Mean Dist " << mean_dist_sq <get_personal_best_value() << std::endl; - + MACH3LOG_INFO("Finished after {} runs out of {}", iter, fIterations); + MACH3LOG_INFO("Mean Dist: {:.2f}", mean_dist_sq); + MACH3LOG_INFO("Best LLR: {:.2f}", get_best_particle()->get_personal_best_value()); uncertainties = bisection(get_best_particle()->get_personal_best_position(),get_best_particle()->get_personal_best_value(),0.5,0.005); - std::cout << "Position for Global Minimum = "<get_personal_best_position()[i], uncertainties[i][1], uncertainties[i][0]); } } +// ************************* void PSO::WriteOutput(){ +// ************************* + outputFile->cd(); TVectorD* PSOParValue = new TVectorD(fDim); diff --git a/mcmc/PSO.h b/mcmc/PSO.h index 3596a45a2..9baf0758e 100644 --- a/mcmc/PSO.h +++ b/mcmc/PSO.h @@ -1,9 +1,5 @@ #pragma once -// -// Created by Emily Ip on 24/2/2023. -// -// Created by Emily Ip on 26/1/2023. -// + // C++ includes #include #include @@ -15,6 +11,7 @@ /// @brief Class particle - stores the position, velocity and personal best /// With functions which move particle and update velocity +/// @note Created by Emily Ip on 24/2/2023. class particle{ public: particle(){}; @@ -72,6 +69,7 @@ class particle{ /// @brief Class PSO, consist of a vector of object Class Particle and global best /// Takes in the size (number of particle) and number of iteration /// functions includes: finding global best, updating velocity, actual minimisation function +/// @note Created by Emily Ip on 24/2/2023. class PSO : public LikelihoodFit { public: /// @brief constructor @@ -98,7 +96,10 @@ class PSO : public LikelihoodFit { double swarmIterate(); std::vector vector_multiply(std::vector velocity, double mul){ - transform(velocity.begin(),velocity.end(),velocity.begin(),std::bind1st(std::multiplies(),mul)); + // std::bind1st deprecated since C++11, removed in c++17 + // transform(velocity.begin(),velocity.end(),velocity.begin(),std::bind1st(std::multiplies(),mul)); + std::transform(velocity.begin(), velocity.end(), velocity.begin(), + std::bind(std::multiplies(), mul, std::placeholders::_1)); return velocity; }; diff --git a/mcmc/SampleSummary.cpp b/mcmc/SampleSummary.cpp index 130a0c245..9988383f1 100644 --- a/mcmc/SampleSummary.cpp +++ b/mcmc/SampleSummary.cpp @@ -1,5 +1,9 @@ #include "mcmc/SampleSummary.h" +//this file is choc full of usage of a root interface that only takes floats, turn this warning off for this CU for now +#pragma GCC diagnostic ignored "-Wfloat-conversion" +#pragma GCC diagnostic ignored "-Wuseless-cast" + // ******************* // The constructor SampleSummary::SampleSummary(const int n_Samples, const std::string &Filename, samplePDFBase* const sample, const int nSteps) { @@ -38,9 +42,7 @@ SampleSummary::SampleSummary(const int n_Samples, const std::string &Filename, s first_pass = true; Outputfile = nullptr; OutputTree = nullptr; - Dir = nullptr; - - rnd = new TRandom3(); + rnd = std::make_unique(); DataHist = new TH2Poly*[nSamples]; DataHist_ProjectX = new TH1D*[nSamples]; @@ -56,7 +58,7 @@ SampleSummary::SampleSummary(const int n_Samples, const std::string &Filename, s if(DoBetaParam) BetaHist = new TH1D**[nSamples]; else BetaHist = nullptr; - maxBins = new int[nSamples]; + maxBins.resize(nSamples); lnLHist_Mean = new TH2Poly*[nSamples]; lnLHist_Mode = new TH2Poly*[nSamples]; @@ -121,7 +123,7 @@ SampleSummary::SampleSummary(const int n_Samples, const std::string &Filename, s } else RandomHist = nullptr; - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { PosteriorHist[i] = NULL; w2Hist[i] = NULL; @@ -148,21 +150,6 @@ SampleSummary::SampleSummary(const int n_Samples, const std::string &Filename, s lnLHist_Sample_DrawflucDraw[i] = NULL; lnLHist_Sample_PredflucDraw[i] = NULL; }//end loop over samples - - llh_data_draw = NULL; - llh_drawfluc_draw = NULL; - llh_predfluc_draw = NULL; - llh_rate_data_draw = NULL; - llh_rate_predfluc_draw = NULL; - - llh_data_drawfluc = NULL; - llh_data_predfluc = NULL; - llh_draw_pred = NULL; - llh_drawfluc_pred = NULL; - - llh_predfluc_pred = NULL; - llh_drawfluc_predfluc = NULL; - llh_datafluc_draw = NULL; DoByModePlots = false; MeanHist_ByMode = NULL; @@ -182,8 +169,6 @@ SampleSummary::~SampleSummary() { //ROOT is weird and once you write TFile claim ownership of histograms. Best is to first delete histograms and then close file Outputfile->Close(); delete Outputfile; - - if(rnd != nullptr) delete rnd; if(lnLHist != NULL) delete lnLHist; if(lnLHist_drawdata != NULL) delete lnLHist_drawdata; @@ -197,10 +182,10 @@ SampleSummary::~SampleSummary() { if(lnLFlucHist_ProjectX != NULL) delete lnLFlucHist_ProjectX; if(DoByModePlots) { - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { if(DataHist[i] == NULL) continue; - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { for (int k = 1; k <= maxBins[i]; ++k) { @@ -216,7 +201,7 @@ SampleSummary::~SampleSummary() { delete[] MeanHist_ByMode; } - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { if(DataHist[i] == NULL) continue; if(DataHist[i] != NULL) delete DataHist[i]; @@ -267,28 +252,10 @@ SampleSummary::~SampleSummary() { delete[] lnLHist_Sample_DrawData; delete[] lnLHist_Sample_DrawflucDraw; delete[] lnLHist_Sample_PredflucDraw; - - delete[] maxBins; - + delete[] PosteriorHist; delete[] w2Hist; if(DoBetaParam) delete[] BetaHist; - - delete[] llh_data_draw; - delete[] llh_data_drawfluc; - delete[] llh_data_predfluc; - delete[] llh_rate_data_draw; - delete[] llh_rate_predfluc_draw; - delete[] llh_draw_pred; - delete[] llh_drawfluc_pred; - delete[] llh_drawfluc_predfluc; - delete[] llh_drawfluc_draw; - delete[] llh_predfluc_pred; - delete[] llh_predfluc_draw; - delete[] llh_datafluc_draw; - - delete[] llh_data_draw_ProjectX; - delete[] llh_drawfluc_draw_ProjectX; } // ******************* @@ -311,7 +278,7 @@ bool SampleSummary::CheckSamples(int Length) { // Since the data doesn't change with varying the MC void SampleSummary::AddData(std::vector &Data) { // ******************* - const int Length = Data.size(); + const int Length = int(Data.size()); // Check length of samples are OK if (!CheckSamples(Length)) throw MaCh3Exception(__FILE__ , __LINE__ ); for (int i = 0; i < Length; ++i) { @@ -324,7 +291,7 @@ void SampleSummary::AddData(std::vector &Data) { std::string classname = std::string(DataHist[i]->Class_Name()); if(classname == "TH2Poly") { - DataHist[i] = (TH2Poly*)(Data[i]->Clone()); + DataHist[i] = static_cast(Data[i]->Clone()); if(doShapeOnly) NormaliseTH2Poly(DataHist[i]); DataHist_ProjectX[i] = ProjectPoly(DataHist[i], true, i); DataHist_ProjectY[i] = ProjectPoly(DataHist[i], false, i); @@ -343,7 +310,7 @@ void SampleSummary::AddData(std::vector &Data) { void SampleSummary::AddNominal(std::vector &Nominal, std::vector &NomW2) { // ******************* - const int Length = Nominal.size(); + const int Length = int(Nominal.size()); if (!CheckSamples(Length)) throw MaCh3Exception(__FILE__ , __LINE__ ); //KS: ROOT is super annoying and you cannot use clone with openMP, hence we have another loop below @@ -366,20 +333,20 @@ void SampleSummary::AddNominal(std::vector &Nominal, std::vectorClone()); + NominalHist[i] = static_cast(Nominal[i]->Clone()); if(doShapeOnly) NormaliseTH2Poly(NominalHist[i]); - W2NomHist[i] = (TH2Poly*)(NomW2[i]->Clone()); + W2NomHist[i] = static_cast(NomW2[i]->Clone()); - lnLHist_Mean[i] = (TH2Poly*)(NominalHist[i]->Clone()); - lnLHist_Mean[i]->SetDirectory(0); - lnLHist_Mode[i] = (TH2Poly*)(NominalHist[i]->Clone()); - lnLHist_Mode[i]->SetDirectory(0); - lnLHist_Mean_ProjectX[i] = (TH1D*)(DataHist_ProjectX[i]->Clone()); - MeanHist[i] = (TH2Poly*)(NominalHist[i]->Clone()); - if(DoBetaParam) MeanHistCorrected[i] = (TH2Poly*)(NominalHist[i]->Clone()); - ModeHist[i] = (TH2Poly*)(NominalHist[i]->Clone()); - W2MeanHist[i] = (TH2Poly*)(NominalHist[i]->Clone()); - W2ModeHist[i] = (TH2Poly*)(NominalHist[i]->Clone()); + lnLHist_Mean[i] = static_cast(NominalHist[i]->Clone()); + lnLHist_Mean[i]->SetDirectory(nullptr); + lnLHist_Mode[i] = static_cast(NominalHist[i]->Clone()); + lnLHist_Mode[i]->SetDirectory(nullptr); + lnLHist_Mean_ProjectX[i] = static_cast(DataHist_ProjectX[i]->Clone()); + MeanHist[i] = static_cast(NominalHist[i]->Clone()); + if(DoBetaParam) MeanHistCorrected[i] = static_cast(NominalHist[i]->Clone()); + ModeHist[i] = static_cast(NominalHist[i]->Clone()); + W2MeanHist[i] = static_cast(NominalHist[i]->Clone()); + W2ModeHist[i] = static_cast(NominalHist[i]->Clone()); } } @@ -429,19 +396,19 @@ void SampleSummary::AddNominal(std::vector &Nominal, std::vector xbins; std::vector ybins; - SamplePDF->SetupBinning(i, xbins, ybins); + SamplePDF->SetupBinning(M3::int_t(i), xbins, ybins); //KS: Y axis is number of events to get estimate of maximal number we use integral - const int MaxBinning = (doShapeOnly) ? 1 : NoOverflowIntegral(NominalHist[i])/4; - ViolinHists_ProjectX[i] = new TH2D((name+"_Violin_ProjectX").c_str(), (name+"_Violin_ProjectX").c_str(), xbins.size()-1, &xbins[0] , 400, 0, MaxBinning); + const int MaxBinning = doShapeOnly ? 1 : int(NoOverflowIntegral(NominalHist[i])/4); + ViolinHists_ProjectX[i] = new TH2D((name+"_Violin_ProjectX").c_str(), (name+"_Violin_ProjectX").c_str(), int(xbins.size()-1), &xbins[0] , 400, 0, MaxBinning); ViolinHists_ProjectX[i]->GetYaxis()->SetTitle("Events"); ViolinHists_ProjectX[i]->GetXaxis()->SetTitle(std::string(NominalHist[i]->GetXaxis()->GetTitle()).c_str() ); - ViolinHists_ProjectX[i]->SetDirectory(0); + ViolinHists_ProjectX[i]->SetDirectory(nullptr); - ViolinHists_ProjectY[i] = new TH2D((name+"_Violin_ProjectY").c_str(), (name+"_Violin_ProjectY").c_str(), ybins.size()-1, &ybins[0] , 400, 0, MaxBinning); + ViolinHists_ProjectY[i] = new TH2D((name+"_Violin_ProjectY").c_str(), (name+"_Violin_ProjectY").c_str(), int(ybins.size()-1), &ybins[0] , 400, 0, MaxBinning); ViolinHists_ProjectY[i]->GetYaxis()->SetTitle("Events"); ViolinHists_ProjectY[i]->GetXaxis()->SetTitle(std::string(NominalHist[i]->GetYaxis()->GetTitle()).c_str()); - ViolinHists_ProjectY[i]->SetDirectory(0); + ViolinHists_ProjectY[i]->SetDirectory(nullptr); ModeHist[i]->SetNameTitle((name+"_mode").c_str(), (name+"_mode").c_str()); ModeHist[i]->Reset(""); @@ -496,7 +463,7 @@ void SampleSummary::AddThrow(std::vector &SampleVector, std::vectorFill(DrawNumber); - const int size = SampleVector.size(); + const int size = int(SampleVector.size()); if (!CheckSamples(size)) throw MaCh3Exception(__FILE__ , __LINE__ ); // Push back the throw @@ -508,7 +475,7 @@ void SampleSummary::AddThrow(std::vector &SampleVector, std::vector &SampleVector, std::vectorGetBins()->At(i-1); + TH2PolyBin* bin = static_cast(SampleVector[SampleNum]->GetBins()->At(i-1)); // Just make a little fancy name std::stringstream ss2; @@ -525,9 +492,9 @@ void SampleSummary::AddThrow(std::vector &SampleVector, std::vectorGetYMin() << "-" << bin->GetYMax() << ")"; PosteriorHist[SampleNum][i] = new TH1D(ss2.str().c_str(), ss2.str().c_str(),nXBins, 1, -1); - PosteriorHist[SampleNum][i]->SetDirectory(0); + PosteriorHist[SampleNum][i]->SetDirectory(nullptr); w2Hist[SampleNum][i] = new TH1D(("w2_"+ss2.str()).c_str(), ("w2_"+ss2.str()).c_str(),nXBins, 1, -1); - w2Hist[SampleNum][i]->SetDirectory(0); + w2Hist[SampleNum][i]->SetDirectory(nullptr); if(DoBetaParam) { std::string betaName = "#beta_param_"; @@ -544,7 +511,7 @@ void SampleSummary::AddThrow(std::vector &SampleVector, std::vector> &SampleVec PosteriorHist_ByMode = new TH1D***[nSamples]; MeanHist_ByMode = new TH2Poly**[nSamples]; - for (_int_ SampleNum = 0; SampleNum < nSamples; SampleNum++) + for (int SampleNum = 0; SampleNum < nSamples; SampleNum++) { if (DataHist[SampleNum] == NULL) continue; PosteriorHist_ByMode[SampleNum] = new TH1D**[Modes->GetNModes()+1]; MeanHist_ByMode[SampleNum] = new TH2Poly*[Modes->GetNModes()+1]; - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { PosteriorHist_ByMode[SampleNum][j] = new TH1D*[maxBins[SampleNum]+1]; - const int nXBins = 500; + constexpr int nXBins = 500; std::string name = std::string(NominalHist[SampleNum]->GetName()); name = name.substr(0, name.find("_nom")); @@ -597,7 +564,7 @@ void SampleSummary::AddThrowByMode(std::vector> &SampleVec for (int i = 1; i <= maxBins[SampleNum]; i++) { //Get PolyBin - TH2PolyBin* bin = (TH2PolyBin*)NominalHist[SampleNum]->GetBins()->At(i-1); + TH2PolyBin* bin = static_cast(NominalHist[SampleNum]->GetBins()->At(i-1)); // Just make a little fancy name std::stringstream ss2; @@ -608,7 +575,7 @@ void SampleSummary::AddThrowByMode(std::vector> &SampleVec //Initialise TH1D which corresponds to each bin in the sample's th2poly PosteriorHist_ByMode[SampleNum][j][i] = new TH1D((name+ss2.str()).c_str(),(name+ss2.str()).c_str(),nXBins, 1, -1); } - MeanHist_ByMode[SampleNum][j] = (TH2Poly*)(NominalHist[SampleNum]->Clone()); + MeanHist_ByMode[SampleNum][j] = static_cast(NominalHist[SampleNum]->Clone()); MeanHist_ByMode[SampleNum][j]->SetNameTitle((name+"_mean").c_str(), (name+"_mean").c_str()); MeanHist_ByMode[SampleNum][j]->Reset(""); MeanHist_ByMode[SampleNum][j]->GetZaxis()->SetTitle("Mean"); @@ -620,18 +587,18 @@ void SampleSummary::AddThrowByMode(std::vector> &SampleVec #ifdef MULTITHREAD #pragma omp parallel for #endif - for (_int_ SampleNum = 0; SampleNum < nSamples; SampleNum++) + for (int SampleNum = 0; SampleNum < nSamples; SampleNum++) { if (DataHist[SampleNum] == NULL) continue; - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { if(doShapeOnly) NormaliseTH2Poly(SampleVector_ByMode[SampleNum][j]); // Loop over the distribution and fill the prior/posterior predictive for (int i = 1; i <= maxBins[SampleNum]; ++i) { const double Content = SampleVector_ByMode[SampleNum][j]->GetBinContent(i); - const int Entries = PosteriorHist_ByMode[SampleNum][j][i]->GetEntries(); + const int Entries = int(PosteriorHist_ByMode[SampleNum][j][i]->GetEntries()); PosteriorHist_ByMode[SampleNum][j][i]->Fill(Content, WeightVector[Entries]); } } @@ -649,42 +616,42 @@ void SampleSummary::PrepareOutput() { // The array of doubles we write to the TTree // Data vs Draw - llh_data_draw = new double[nSamples]; + llh_data_draw.resize(nSamples); // Fluctuated Draw vs Draw - llh_drawfluc_draw = new double[nSamples]; + llh_drawfluc_draw.resize(nSamples); // Fluctuated Predicitve vs Draw - llh_predfluc_draw = new double[nSamples]; + llh_predfluc_draw.resize(nSamples); // Data vs Draw using Rate - llh_rate_data_draw = new double[nSamples]; + llh_rate_data_draw.resize(nSamples); // Data vs Fluctuated Predictive using Rate - llh_rate_predfluc_draw = new double[nSamples]; + llh_rate_predfluc_draw.resize(nSamples); // Data vs Fluctuated Draw - llh_data_drawfluc = new double[nSamples]; + llh_data_drawfluc.resize(nSamples); // Data vs Fluctuated Predictive - llh_data_predfluc = new double[nSamples]; + llh_data_predfluc.resize(nSamples); // Draw vs Predictive - llh_draw_pred = new double[nSamples]; + llh_draw_pred.resize(nSamples); // Fluctuated Draw vs Predictive - llh_drawfluc_pred = new double[nSamples]; + llh_drawfluc_pred.resize(nSamples); // Fluctuated Draw vs Fluctuated Predictive - llh_drawfluc_predfluc = new double[nSamples]; + llh_drawfluc_predfluc.resize(nSamples); // Fluctuated Predictive vs Predictive - llh_predfluc_pred = new double[nSamples]; + llh_predfluc_pred.resize(nSamples); // Fluctuated Data vs Draw - llh_datafluc_draw = new double[nSamples]; + llh_datafluc_draw.resize(nSamples); // Data vs Draw for 1D projection - llh_data_draw_ProjectX = new double[nSamples]; - llh_drawfluc_draw_ProjectX = new double[nSamples]; + llh_data_draw_ProjectX.resize(nSamples); + llh_drawfluc_draw_ProjectX.resize(nSamples); // The output tree we're going to write to OutputTree = new TTree("LLH_draws", "LLH_draws"); SampleNames.resize(nSamples); // Loop over the samples and set the addresses of the variables to write to file - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { // Get the name std::string SampleName = SamplePDF->GetSampleName(i); @@ -742,8 +709,8 @@ void SampleSummary::PrepareOutput() { OutputTree->Branch("total_llh_drawfluc_draw_ProjectX", &total_llh_drawfluc_draw_ProjectX); Outputfile->cd(); - Dir = new TDirectory*[nSamples]; - for (_int_ i = 0; i < nSamples; ++i) + Dir.resize(nSamples); + for (int i = 0; i < nSamples; ++i) { // Make a new directory Dir[i] = Outputfile->mkdir((SampleNames[i]).c_str()); @@ -789,7 +756,7 @@ void SampleSummary::Write() { // Loop over each sample and write to file //KS: Multithreading is tempting here but we also write to ROOT file, separating all LLH and poly projections from write could work well - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { // Skip the null histograms if (DataHist[i] == NULL || NoOverflowIntegral(DataHist[i]) == 0) continue; @@ -850,7 +817,7 @@ void SampleSummary::Write() { SampleName.replace(SampleName.find("-"), 1, std::string("_")); } OutputTree->Draw((SampleName+"_data_draw:"+SampleName+"_drawfluc_draw>>htemp").c_str()); - TH2D *TempHistogram = (TH2D*)((gDirectory->Get("htemp"))->Clone()); + TH2D *TempHistogram = static_cast(gDirectory->Get("htemp")->Clone()); TempHistogram->GetXaxis()->SetTitle("-2LLH(Draw Fluc, Draw)"); TempHistogram->GetYaxis()->SetTitle("-2LLH(Data, Draw)"); TempHistogram->SetNameTitle((SampleNames[i]+"_drawfluc_draw").c_str(), (SampleNames[i]+"_drawfluc_draw").c_str()); @@ -860,7 +827,7 @@ void SampleSummary::Write() { // Also write the 2D histograms for the p-value OutputTree->Draw((SampleName+"_data_draw:"+SampleName+"_predfluc_draw>>htemp2").c_str()); - TH2D *TempHistogram2 = (TH2D*)((gDirectory->Get("htemp2"))->Clone()); + TH2D *TempHistogram2 = static_cast(gDirectory->Get("htemp2")->Clone()); TempHistogram2->GetXaxis()->SetTitle("-2LLH(Pred Fluc, Draw)"); TempHistogram2->GetYaxis()->SetTitle("-2LLH(Data, Draw)"); TempHistogram2->SetNameTitle((SampleNames[i]+"_predfluc_draw").c_str(), (SampleNames[i]+"_predfluc_draw").c_str()); @@ -870,7 +837,7 @@ void SampleSummary::Write() { // finally p-value for 1D projection OutputTree->Draw((SampleName+"_rate_data_draw:"+SampleName+"_rate_predfluc_draw>>htemp3").c_str()); - TH2D *TempHistogram3 = (TH2D*)((gDirectory->Get("htemp3"))->Clone()); + TH2D *TempHistogram3 = static_cast(gDirectory->Get("htemp3")->Clone()); TempHistogram3->GetXaxis()->SetTitle("-2LLH(Pred Fluc, Draw)"); TempHistogram3->GetYaxis()->SetTitle("-2LLH(Data, Draw)"); TempHistogram3->SetNameTitle((SampleNames[i]+"_rate_predfluc_draw").c_str(), (SampleNames[i]+"_rate_predfluc_draw").c_str()); @@ -880,7 +847,7 @@ void SampleSummary::Write() { // finally p-value for 1D projection OutputTree->Draw((SampleName+"_data_draw_ProjectX:"+SampleName+"_drawfluc_draw_ProjectX>>htemp4").c_str()); - TH2D *TempHistogram4 = (TH2D*)((gDirectory->Get("htemp4"))->Clone()); + TH2D *TempHistogram4 = static_cast(gDirectory->Get("htemp4")->Clone()); TempHistogram4->GetXaxis()->SetTitle(("-2LLH_{Draw Fluc, Draw} for " + SamplePDF->GetKinVarLabel(i, 0)).c_str()); TempHistogram4->GetYaxis()->SetTitle(("-2LLH_{Data, Draw} for " + SamplePDF->GetKinVarLabel(i, 0)).c_str()); TempHistogram4->SetNameTitle((SampleNames[i]+"_drawfluc_draw_ProjectX").c_str(), (SampleNames[i]+"_drawfluc_draw_ProjectX").c_str()); @@ -939,11 +906,13 @@ void SampleSummary::Write() { PosteriorHist[i][b]->Write(); std::string Title = PosteriorHist[i][b]->GetName(); - TLine *TempLine = new TLine(NominalHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMinimum(), NominalHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMaximum()); + auto TempLine = std::make_unique(NominalHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMinimum(), + NominalHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMaximum()); TempLine->SetLineColor(kRed); TempLine->SetLineWidth(2); - TLine *TempLineData = new TLine(DataHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMinimum(), DataHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMaximum()); + auto TempLineData = std::make_unique(DataHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMinimum(), + DataHist[i]->GetBinContent(b), PosteriorHist[i][b]->GetMaximum()); TempLineData->SetLineColor(kGreen); TempLineData->SetLineWidth(2); @@ -952,13 +921,13 @@ void SampleSummary::Write() { PosteriorHist[i][b]->Fit(Fitter, "RQ"); Fitter->SetLineColor(kRed-5); - TLegend *Legend = new TLegend(0.4, 0.75, 0.98, 0.90); + auto Legend = std::make_unique(0.4, 0.75, 0.98, 0.90); Legend->SetFillColor(0); Legend->SetFillStyle(0); Legend->SetLineWidth(0); Legend->SetLineColor(0); - Legend->AddEntry(TempLineData, Form("Data #mu=%.2f", DataHist[i]->GetBinContent(b)), "l"); - Legend->AddEntry(TempLine, Form("Prior #mu=%.2f", NominalHist[i]->GetBinContent(b)), "l"); + Legend->AddEntry(TempLineData.get(), Form("Data #mu=%.2f", DataHist[i]->GetBinContent(b)), "l"); + Legend->AddEntry(TempLine.get(), Form("Prior #mu=%.2f", NominalHist[i]->GetBinContent(b)), "l"); Legend->AddEntry(PosteriorHist[i][b], Form("Post, #mu=%.2f#pm%.2f", PosteriorHist[i][b]->GetMean(), PosteriorHist[i][b]->GetRMS()), "l"); Legend->AddEntry(Fitter, Form("Gauss, #mu=%.2f#pm%.2f", Fitter->GetParameter(1), Fitter->GetParameter(2)), "l"); std::string TempTitle = std::string(PosteriorHist[i][b]->GetName()); @@ -979,11 +948,8 @@ void SampleSummary::Write() { Legend->Draw("same"); TempCanvas->Write(); - delete TempLine; - delete TempLineData; delete TempCanvas; delete Fitter; - delete Legend; //This isn't useful check only in desperation if(Debug > 1) w2Hist[i][b]->Write(); } @@ -1008,7 +974,7 @@ void SampleSummary::Write() { if(DoByModePlots) { - for (_int_ j = 0; j < Modes->GetNModes()+1; ++j) + for (int j = 0; j < Modes->GetNModes()+1; ++j) { MeanHist_ByMode[i][j]->Write(); TH1D *MeanProjectX_ByMode = ProjectPoly(MeanHist_ByMode[i][j], true, i, true); @@ -1080,7 +1046,7 @@ void SampleSummary::MakePredictive() { #ifdef MULTITHREAD #pragma omp parallel for reduction(+:llh_total_temp) #endif - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { // Skip disabled samples if (DataHist[SampleNum] == NULL || NoOverflowIntegral(DataHist[SampleNum]) == 0) continue; @@ -1092,8 +1058,8 @@ void SampleSummary::MakePredictive() { // Loop over each pmu cosmu bin for (int j = 1; j < maxBins[SampleNum]+1; ++j) { - TH1D *Projection = (TH1D*) PosteriorHist[SampleNum][j]; - TH1D *W2Projection = (TH1D*) w2Hist[SampleNum][j]; + TH1D *Projection = PosteriorHist[SampleNum][j]; + TH1D *W2Projection = w2Hist[SampleNum][j]; // Data content for the j,kth bin const double nData = DataHist[SampleNum]->GetBinContent(j); @@ -1127,7 +1093,7 @@ void SampleSummary::MakePredictive() { if(DoBetaParam) { - TH1D *BetaTemp = (TH1D*)BetaHist[SampleNum][j]; + TH1D *BetaTemp = BetaHist[SampleNum][j]; const double nBetaMean = BetaTemp->GetMean(); const double nBetaMeanError = BetaTemp->GetRMS(); //KS: Here we modify predictions by beta parameter from Barlow-Beeston @@ -1156,7 +1122,7 @@ void SampleSummary::MakePredictive() { } // End loop over bins if(DoByModePlots) { - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { // Loop over each pmu cosmu bin for (int i = 1; i < maxBins[SampleNum]+1; ++i) @@ -1164,7 +1130,7 @@ void SampleSummary::MakePredictive() { // Make the posterior/prior predictive projection on z // The z axis of Predictive is the bin content // Essentially zooming in on one bin and looking at the mean and mode of that bin - TH1D *Projection = (TH1D*)PosteriorHist_ByMode[SampleNum][j][i]; + TH1D *Projection = PosteriorHist_ByMode[SampleNum][j][i]; // Get the mean for this projection for all the samples const double nMean = Projection->GetMean(); @@ -1181,7 +1147,7 @@ void SampleSummary::MakePredictive() { } // End loop over samples // This is not multithreaded as due to ProjectPoly it is not safe - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { // Skip disabled samples if (DataHist[SampleNum] == NULL || NoOverflowIntegral(DataHist[SampleNum]) == 0) continue; @@ -1286,17 +1252,17 @@ void SampleSummary::MakeChi2Hists() { TH1D **DrawW2HistProjectX = new TH1D*[nSamples]; //KS: We have to clone histograms here to avoid cloning in MP loop, we have to make sure binning matches, content doesn't have to - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { - FluctHist[SampleNum] = (TH2Poly*)(MeanHist[SampleNum]->Clone()); - FluctDrawHist[SampleNum] = (TH2Poly*)(MeanHist[SampleNum]->Clone()); - DataFlucHist[SampleNum] = (TH2Poly*)(MeanHist[SampleNum]->Clone()); + FluctHist[SampleNum] = static_cast(MeanHist[SampleNum]->Clone()); + FluctDrawHist[SampleNum] = static_cast(MeanHist[SampleNum]->Clone()); + DataFlucHist[SampleNum] = static_cast(MeanHist[SampleNum]->Clone()); - FluctDrawHistProjectX[SampleNum] = (TH1D*)(DataHist_ProjectX[SampleNum]->Clone()); + FluctDrawHistProjectX[SampleNum] = static_cast(DataHist_ProjectX[SampleNum]->Clone()); // Get the ith draw for the jth sample - TH2Poly *DrawHist = (TH2Poly*)(MCVector[i][SampleNum]); - TH2Poly *DrawW2Hist = (TH2Poly*)(W2MCVector[i][SampleNum]); + TH2Poly *DrawHist = MCVector[i][SampleNum]; + TH2Poly *DrawW2Hist = W2MCVector[i][SampleNum]; //ProjectPoly calls new TH1D under the hood, never define new ROOT object under MP... DrawHistProjectX[SampleNum] = ProjectPoly(DrawHist, true, SampleNum); @@ -1308,11 +1274,11 @@ void SampleSummary::MakeChi2Hists() { #pragma omp parallel for reduction(+:total_llh_data_draw_temp, total_llh_drawfluc_draw_temp, total_llh_predfluc_draw_temp, total_llh_rate_data_draw_temp, total_llh_rate_predfluc_draw_temp, total_llh_data_drawfluc_temp, total_llh_data_predfluc_temp, total_llh_draw_pred_temp, total_llh_drawfluc_pred_temp, total_llh_drawfluc_predfluc_temp, total_llh_predfluc_pred_temp, total_llh_datafluc_draw_temp, total_llh_data_draw_ProjectX_temp, total_llh_drawfluc_draw_ProjectX_temp) #endif // Loop over the samples - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { // Get the ith draw for the jth sample - TH2Poly *DrawHist = (TH2Poly*)(MCVector[i][SampleNum]); - TH2Poly *DrawW2Hist = (TH2Poly*)(W2MCVector[i][SampleNum]); + TH2Poly *DrawHist = MCVector[i][SampleNum]; + TH2Poly *DrawW2Hist = W2MCVector[i][SampleNum]; // Skip empty samples if (DrawHist == NULL) continue; @@ -1441,7 +1407,7 @@ void SampleSummary::MakeChi2Hists() { } // End loop over samples (still looping throws) // Delete the temporary histograms - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { delete FluctHist[SampleNum]; delete FluctDrawHist[SampleNum]; @@ -1572,12 +1538,12 @@ void SampleSummary::MakeCutLLH1D(TH1D *Histogram, double llh_ref) { Histogram->SetTitle((std::string(Histogram->GetTitle())+"_"+ss.str()).c_str()); // Write a TCanvas and make a line and a filled histogram - TLine *TempLine = new TLine(llh_reference , Histogram->GetMinimum(), llh_reference, Histogram->GetMaximum()); + auto TempLine = std::make_unique(llh_reference , Histogram->GetMinimum(), llh_reference, Histogram->GetMaximum()); TempLine->SetLineColor(kBlack); TempLine->SetLineWidth(2); // Make the fill histogram - TH1D *TempHistogram = (TH1D*)(Histogram->Clone()); + TH1D *TempHistogram = static_cast(Histogram->Clone()); TempHistogram->SetFillStyle(1001); TempHistogram->SetFillColor(kRed); for (int i = 0; i < TempHistogram->GetNbinsX(); ++i) @@ -1588,12 +1554,12 @@ void SampleSummary::MakeCutLLH1D(TH1D *Histogram, double llh_ref) { } } - TLegend *Legend = new TLegend(0.6, 0.6, 0.9, 0.9); + auto Legend = std::make_unique(0.6, 0.6, 0.9, 0.9); Legend->SetFillColor(0); Legend->SetFillStyle(0); Legend->SetLineWidth(0); Legend->SetLineColor(0); - Legend->AddEntry(TempLine, Form("Reference LLH, %.0f, p-value=%.2f", llh_reference, pvalue), "l"); + Legend->AddEntry(TempLine.get(), Form("Reference LLH, %.0f, p-value=%.2f", llh_reference, pvalue), "l"); Legend->AddEntry(Histogram, Form("LLH, #mu=%.1f#pm%.1f", Histogram->GetMean(), Histogram->GetRMS()), "l"); std::string Title = Histogram->GetName(); Title += "_canv"; @@ -1607,17 +1573,14 @@ void SampleSummary::MakeCutLLH1D(TH1D *Histogram, double llh_ref) { TempCanvas->Write(); - delete TempLine; delete TempHistogram; delete TempCanvas; - delete Legend; } // **************** // Make the 2D cut distribution and give the 2D p-value void SampleSummary::MakeCutLLH2D(TH2D *Histogram) { // **************** - const double TotalIntegral = Histogram->Integral(); // Count how many fills are above y=x axis // This is the 2D p-value @@ -1649,7 +1612,7 @@ void SampleSummary::MakeCutLLH2D(TH2D *Histogram) { maximum += Histogram->GetYaxis()->GetBinWidth(Histogram->GetYaxis()->GetNbins()); } else maximum += Histogram->GetXaxis()->GetBinWidth(Histogram->GetXaxis()->GetNbins()); - TLine *TempLine = new TLine(minimum, minimum, maximum, maximum); + auto TempLine = std::make_unique(minimum, minimum, maximum, maximum); TempLine->SetLineColor(kRed); TempLine->SetLineWidth(2); @@ -1665,7 +1628,6 @@ void SampleSummary::MakeCutLLH2D(TH2D *Histogram) { TempLine->Draw("same"); TempCanvas->Write(); - delete TempLine; delete TempCanvas; } @@ -1674,7 +1636,7 @@ void SampleSummary::MakeCutLLH2D(TH2D *Histogram) { void SampleSummary::MakeCutEventRate(TH1D *Histogram, const double DataRate) { // **************** // For the event rate histogram add a TLine to the data rate - TLine *TempLine = new TLine(DataRate, Histogram->GetMinimum(), DataRate, Histogram->GetMaximum()); + auto TempLine = std::make_unique(DataRate, Histogram->GetMinimum(), DataRate, Histogram->GetMaximum()); TempLine->SetLineColor(kRed); TempLine->SetLineWidth(2); // Also fit a Gaussian because why not? @@ -1690,12 +1652,12 @@ void SampleSummary::MakeCutEventRate(TH1D *Histogram, const double DataRate) { } } const double pvalue = Above/Histogram->Integral(); - TLegend *Legend = new TLegend(0.4, 0.75, 0.98, 0.90); + auto Legend = std::make_unique(0.4, 0.75, 0.98, 0.90); Legend->SetFillColor(0); Legend->SetFillStyle(0); Legend->SetLineWidth(0); Legend->SetLineColor(0); - Legend->AddEntry(TempLine, Form("Data, %.0f, p-value=%.2f", DataRate, pvalue), "l"); + Legend->AddEntry(TempLine.get(), Form("Data, %.0f, p-value=%.2f", DataRate, pvalue), "l"); Legend->AddEntry(Histogram, Form("MC, #mu=%.1f#pm%.1f", Histogram->GetMean(), Histogram->GetRMS()), "l"); Legend->AddEntry(Fitter, Form("Gauss, #mu=%.1f#pm%.1f", Fitter->GetParameter(1), Fitter->GetParameter(2)), "l"); std::string TempTitle = std::string(Histogram->GetName()); @@ -1715,10 +1677,8 @@ void SampleSummary::MakeCutEventRate(TH1D *Histogram, const double DataRate) { TempCanvas->Write(); Histogram->Write(); - delete TempLine; delete TempCanvas; delete Fitter; - delete Legend; } // **************** @@ -1727,7 +1687,7 @@ template HistType* SampleSummary::RatioHists(HistType *NumHist, HistType *DenomHist) { // **************** - HistType *NumCopy = (HistType*)(NumHist->Clone()); + HistType *NumCopy = static_cast(NumHist->Clone()); std::string title = std::string(DenomHist->GetName()) + "_ratio"; NumCopy->SetNameTitle(title.c_str(), title.c_str()); NumCopy->Divide(DenomHist); @@ -1740,7 +1700,7 @@ HistType* SampleSummary::RatioHists(HistType *NumHist, HistType *DenomHist) { TH2Poly* SampleSummary::RatioPolys(TH2Poly *NumHist, TH2Poly *DenomHist) { // **************** - TH2Poly *NumCopy = (TH2Poly*)(NumHist->Clone()); + TH2Poly *NumCopy = static_cast(NumHist->Clone()); std::string title = std::string(DenomHist->GetName()) + "_ratio"; NumCopy->SetNameTitle(title.c_str(), title.c_str()); @@ -1826,7 +1786,7 @@ void SampleSummary::PlotBetaParameters() { MACH3LOG_INFO("Writing Beta parameters"); TDirectory **DirBeta = new TDirectory*[nSamples]; - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { // Make a new directory DirBeta[i] = BetaDir->mkdir((SampleNames[i]).c_str()); @@ -1850,7 +1810,7 @@ void SampleSummary::PlotBetaParameters() { BetaHist[i][j]->Fit(Fitter, "RQ"); Fitter->SetLineColor(kRed-5); - TLegend *Legend = new TLegend(0.4, 0.75, 0.98, 0.90); + auto Legend = std::make_unique(0.4, 0.75, 0.98, 0.90); Legend->SetFillColor(0); Legend->SetFillStyle(0); Legend->SetLineWidth(0); @@ -1879,7 +1839,6 @@ void SampleSummary::PlotBetaParameters() { delete TempLine; delete TempCanvas; delete Fitter; - delete Legend; } DirBeta[i]->Write(); delete DirBeta[i]; @@ -1900,14 +1859,14 @@ void SampleSummary::StudyKinematicCorrelations() { timer.Start(); // Data vs Draw for 1D projection - double* NEvents_Sample = new double[nSamples]; + std::vector NEvents_Sample(nSamples); double event_rate = 0.; // The output tree we're going to write to TTree* Event_Rate_Tree = new TTree("Event_Rate_draws", "Event_Rate_draws"); Event_Rate_Tree->Branch("Event_Rate", &event_rate); // Loop over the samples and set the addresses of the variables to write to file - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { // Get the name std::string SampleName = SampleNames[i]; @@ -1926,7 +1885,6 @@ void SampleSummary::StudyKinematicCorrelations() { // Holds the event rate for the distribution TH1D **SumHist = new TH1D*[nSamples]; - for (int i = 0; i < nSamples; ++i) { std::string name = std::string(NominalHist[i]->GetName()); @@ -1948,7 +1906,7 @@ void SampleSummary::StudyKinematicCorrelations() { #ifdef MULTITHREAD #pragma omp parallel for reduction(+:event_rate_temp) #endif - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { NEvents_Sample[SampleNum] = NoOverflowIntegral(MCVector[it][SampleNum]); // Fill the sum histogram with the integral of the sampled distribution @@ -1967,28 +1925,27 @@ void SampleSummary::StudyKinematicCorrelations() { #ifdef MULTITHREAD #pragma omp parallel for reduction(+:DataRate) #endif - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { DataRate += NoOverflowIntegral(DataHist[i]); } MakeCutEventRate(EventHist, DataRate); delete EventHist; - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { Dir[SampleNum]->cd(); //Make fancy event rate histogram MakeCutEventRate(SumHist[SampleNum], NoOverflowIntegral(DataHist[SampleNum])); } - // Make a new direcotry + // Make a new directory TDirectory *CorrDir = Outputfile->mkdir("Correlations"); CorrDir->cd(); TMatrixDSym* SampleCorrelation = new TMatrixDSym(nSamples); TH2D*** SamCorr = new TH2D**[nSamples](); - for (int i = 0; i < nSamples; ++i) { SamCorr[i] = new TH2D*[nSamples](); @@ -2004,7 +1961,7 @@ void SampleSummary::StudyKinematicCorrelations() { // TH2D to hold the Correlation SamCorr[i][j] = new TH2D(Form("SamCorr_%i_%i",i,j), Form("SamCorr_%i_%i",i,j), 70, Min_i, Max_i, 70, Min_j, Max_j); - SamCorr[i][j]->SetDirectory(0); + SamCorr[i][j]->SetDirectory(nullptr); SamCorr[i][j]->SetMinimum(0); SamCorr[i][j]->GetXaxis()->SetTitle(SampleNames[i].c_str()); SamCorr[i][j]->GetYaxis()->SetTitle(SampleNames[j].c_str()); @@ -2037,7 +1994,7 @@ void SampleSummary::StudyKinematicCorrelations() { }// End i loop TH2D* hSamCorr = new TH2D("Sample Correlation", "Sample Correlation", nSamples, 0, nSamples, nSamples, 0, nSamples); - hSamCorr->SetDirectory(0); + hSamCorr->SetDirectory(nullptr); hSamCorr->GetZaxis()->SetTitle("Correlation"); hSamCorr->SetMinimum(-1); hSamCorr->SetMaximum(1); @@ -2104,7 +2061,7 @@ void SampleSummary::StudyKinematicCorrelations() { const double Max_i = PosteriorHist[SampleNum][i+1]->GetXaxis()->GetBinUpEdge(PosteriorHist[SampleNum][i+1]->GetNbinsX()+1); //Get PolyBin - TH2PolyBin* bin = (TH2PolyBin*)NominalHist[SampleNum]->GetBins()->At(i); + TH2PolyBin* bin = static_cast(NominalHist[SampleNum]->GetBins()->At(i)); // Just make a little fancy name std::stringstream ss2; ss2 << "p_{#mu} (" << bin->GetXMin() << "-" << bin->GetXMax() << ")"; @@ -2117,12 +2074,12 @@ void SampleSummary::StudyKinematicCorrelations() { // TH2D to hold the Correlation KinCorr[i][j] = new TH2D(Form("Kin_%i_%i_%i",SampleNum,i,j), Form("Kin_%i_%i_%i",SampleNum,i,j), 70, Min_i, Max_i, 70, Min_j, Max_j); - KinCorr[i][j]->SetDirectory(0); + KinCorr[i][j]->SetDirectory(nullptr); KinCorr[i][j]->SetMinimum(0); KinCorr[i][j]->GetXaxis()->SetTitle(ss2.str().c_str()); - bin = (TH2PolyBin*)NominalHist[SampleNum]->GetBins()->At(j); + bin = static_cast(NominalHist[SampleNum]->GetBins()->At(j)); // Just make a little fancy name std::stringstream ss3; ss3 << "p_{#mu} (" << bin->GetXMin() << "-" << bin->GetXMax() << ")"; @@ -2158,7 +2115,7 @@ void SampleSummary::StudyKinematicCorrelations() { TH2D* hKinCorr = new TH2D(SampleNames[SampleNum].c_str(), SampleNames[SampleNum].c_str(), maxBins[SampleNum], 0, maxBins[SampleNum], maxBins[SampleNum], 0, maxBins[SampleNum]); - hKinCorr->SetDirectory(0); + hKinCorr->SetDirectory(nullptr); hKinCorr->GetZaxis()->SetTitle("Correlation"); hKinCorr->SetMinimum(-1); hKinCorr->SetMaximum(1); @@ -2169,7 +2126,7 @@ void SampleSummary::StudyKinematicCorrelations() { for (int i = 0; i < maxBins[SampleNum]; ++i) { //Get PolyBin - TH2PolyBin* bin = (TH2PolyBin*)NominalHist[SampleNum]->GetBins()->At(i); + TH2PolyBin* bin = static_cast(NominalHist[SampleNum]->GetBins()->At(i)); // Just make a little fancy name std::stringstream ss2; ss2 << "p_{#mu} (" << bin->GetXMin() << "-" << bin->GetXMax() << ")"; @@ -2178,7 +2135,7 @@ void SampleSummary::StudyKinematicCorrelations() { for (int j = 0; j < maxBins[SampleNum]; ++j) { - bin = (TH2PolyBin*)NominalHist[SampleNum]->GetBins()->At(j); + bin = static_cast(NominalHist[SampleNum]->GetBins()->At(j)); // Just make a little fancy name std::stringstream ss3; ss3 << "p_{#mu} (" << bin->GetXMin() << "-" << bin->GetXMax() << ")"; @@ -2229,9 +2186,8 @@ void SampleSummary::StudyKinematicCorrelations() { if(DoByModePlots) { // Holds the total event rate by mode - TH1D ** EventHist_ByMode = new TH1D*[Modes->GetNModes()+1]; - - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + std::vector EventHist_ByMode(Modes->GetNModes()+1); + for (int j = 0; j < Modes->GetNModes()+1; j++) { std::string ModeName = Modes->GetMaCh3ModeName(j); EventHist_ByMode[j] = new TH1D(Form("EventHist_%s", ModeName.c_str()), Form("Total Event Rate %s", ModeName.c_str()), 100, 1, -1); @@ -2243,13 +2199,13 @@ void SampleSummary::StudyKinematicCorrelations() { //KS: Here we calculate total event rates for each mode, maybe not most efficient but can be improved in the future for (unsigned int it = 0; it < nThrows; ++it) { - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { double event_rate_temp = 0.; #ifdef MULTITHREAD #pragma omp parallel for reduction(+:event_rate_temp) #endif - for (_int_ SampleNum = 0; SampleNum < nSamples; SampleNum++) + for (int SampleNum = 0; SampleNum < nSamples; SampleNum++) { event_rate_temp += NoOverflowIntegral(MCVectorByMode[it][SampleNum][j]); } @@ -2257,7 +2213,7 @@ void SampleSummary::StudyKinematicCorrelations() { } } - for (_int_ i = 0; i < Modes->GetNModes()+1; ++i) + for (int i = 0; i < Modes->GetNModes()+1; ++i) { MakeCutEventRate(EventHist_ByMode[i], DataRate); } @@ -2280,7 +2236,7 @@ void SampleSummary::StudyKinematicCorrelations() { // TH2D to hold the Correlation ModeCorr[i][j] = new TH2D(Form("ModeCorr_%i_%i",i,j), Form("ModeCorr_%i_%i",i,j), 70, Min_i, Max_i, 70, Min_j, Max_j); - ModeCorr[i][j]->SetDirectory(0); + ModeCorr[i][j]->SetDirectory(nullptr); ModeCorr[i][j]->SetMinimum(0); ModeCorr[i][j]->GetXaxis()->SetTitle(Modes->GetMaCh3ModeName(i).c_str()); ModeCorr[i][j]->GetYaxis()->SetTitle(Modes->GetMaCh3ModeName(j).c_str()); @@ -2320,7 +2276,7 @@ void SampleSummary::StudyKinematicCorrelations() { }// End i loop TH2D* hModeCorr = new TH2D("Mode Correlation", "Mode Correlation", Modes->GetNModes()+1, 0, Modes->GetNModes()+1, Modes->GetNModes()+1, 0, Modes->GetNModes()+1); - hModeCorr->SetDirectory(0); + hModeCorr->SetDirectory(nullptr); hModeCorr->GetZaxis()->SetTitle("Correlation"); hModeCorr->SetMinimum(-1); hModeCorr->SetMaximum(1); @@ -2366,14 +2322,13 @@ void SampleSummary::StudyKinematicCorrelations() { ModeCorrelation->Write("Mode_Correlation"); delete ModeCorrelation; - for (_int_ j = 0; j < Modes->GetNModes()+1; j++) + for (int j = 0; j < Modes->GetNModes()+1; j++) { delete EventHist_ByMode[j]; } - delete[] EventHist_ByMode; } - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { delete SumHist[i]; } @@ -2407,12 +2362,12 @@ TH1D* SampleSummary::ProjectHist(TH2D* Histogram, bool ProjectX) { // **************** // Make a projection -TH1D* SampleSummary::ProjectPoly(TH2Poly* Histogram, const bool ProjectX, const _int_ selection, const bool MakeErrorHist) { +TH1D* SampleSummary::ProjectPoly(TH2Poly* Histogram, const bool ProjectX, const int selection, const bool MakeErrorHist) { // **************** std::vector xbins; std::vector ybins; - SamplePDF->SetupBinning(selection, xbins, ybins); + SamplePDF->SetupBinning(M3::int_t(selection), xbins, ybins); TH1D* Projection = NULL; std::string name; if (ProjectX) { @@ -2541,7 +2496,7 @@ int SampleSummary::GetRandomPoly2(const TH2Poly* PolyHist){ fIntegral[nbins+1] = PolyHist->GetEntries(); //KS: We just return one rather then X and Y, this way we can use SetBinContent rather than Fill, which is faster - int iBin = TMath::BinarySearch(nbins, fIntegral, (Double_t) r1); + int iBin = int(TMath::BinarySearch(nbins, fIntegral, r1)); //KS: Have to increment because TH2Poly has stupid offset arghh iBin += 1; @@ -2612,7 +2567,7 @@ void SampleSummary::StudyBIC(){ #ifdef MULTITHREAD #pragma omp parallel for reduction(+:DataRate, BinsRate) #endif - for (_int_ i = 0; i < nSamples; ++i) + for (int i = 0; i < nSamples; ++i) { if (DataHist[i] == NULL) continue; DataRate += NoOverflowIntegral(DataHist[i]); @@ -2642,7 +2597,7 @@ void SampleSummary::StudyDIC() { for (unsigned int i = 0; i < nThrows; ++i) { double LLH_temp = 0.; - for (_int_ SampleNum = 0; SampleNum < nSamples; ++SampleNum) + for (int SampleNum = 0; SampleNum < nSamples; ++SampleNum) { LLH_temp += GetLLH(DataHist[SampleNum], MCVector[i][SampleNum], W2MCVector[i][SampleNum]); } @@ -2661,7 +2616,6 @@ void SampleSummary::StudyDIC() { MACH3LOG_INFO("Effective number of parameters following DIC formalism is equal to: {:.2f}", p_D); MACH3LOG_INFO("DIC test statistic = {:.2f}", DIC_stat); MACH3LOG_INFO("******************************"); - return; } // **************** @@ -2673,5 +2627,4 @@ void SampleSummary::FastViolinFill(TH2D* violin, TH1D* hist_1d){ const double y = violin->GetYaxis()->FindBin(hist_1d->GetBinContent(x+1)); violin->SetBinContent(x+1, y, violin->GetBinContent(x+1, y)+1); } - return; } diff --git a/mcmc/SampleSummary.h b/mcmc/SampleSummary.h index ad9d89751..dfa3e8705 100644 --- a/mcmc/SampleSummary.h +++ b/mcmc/SampleSummary.h @@ -86,7 +86,7 @@ class SampleSummary { /// @brief Helper to project TH2D onto axis inline TH1D* ProjectHist(TH2D* Histogram, const bool ProjectX); /// @brief Helper to project TH2Poly onto axis - inline TH1D* ProjectPoly(TH2Poly* Histogram, const bool ProjectX, const _int_ selection, const bool MakeErrorHist = false); + inline TH1D* ProjectPoly(TH2Poly* Histogram, const bool ProjectX, const int selection, const bool MakeErrorHist = false); /// @brief Make Poisson fluctuation of TH1D hist inline void MakeFluctuatedHistogram(TH1D *FluctHist, TH1D* PolyHist); @@ -120,7 +120,7 @@ class SampleSummary { inline void NormaliseTH2Poly(TH2Poly* Histogram); /// Random number generator - TRandom3* rnd; + std::unique_ptr rnd; /// KS: Hacky flag to let us know if this is first toy bool first_pass; @@ -140,7 +140,7 @@ class SampleSummary { std::vector WeightVector; /// Number of samples - _int_ nSamples; + int nSamples; /// name for each sample std::vector SampleNames; @@ -236,7 +236,7 @@ class SampleSummary { unsigned int nThrows; /// Max Number of Bins per each sample - int* maxBins; + std::vector maxBins; /// Total LLH for the posterior predictive distribution double llh_total; @@ -246,41 +246,41 @@ class SampleSummary { /// Output filename TFile *Outputfile; /// Directory for each sample - TDirectory **Dir; + std::vector Dir; /// TTree which we save useful information to TTree *OutputTree; /// Data vs Draw - double *llh_data_draw; + std::vector llh_data_draw; /// Fluctuated Draw vs Draw - double *llh_drawfluc_draw; + std::vector llh_drawfluc_draw; /// Fluctuated Predictive vs Draw - double *llh_predfluc_draw; + std::vector llh_predfluc_draw; /// Data vs Draw using rate only - double *llh_rate_data_draw; + std::vector llh_rate_data_draw; /// Fluctuated Predictive vs Draw using rate only - double *llh_rate_predfluc_draw; + std::vector llh_rate_predfluc_draw; /// Data vs Fluctuated Draw - double *llh_data_drawfluc; + std::vector llh_data_drawfluc; /// Data vs Fluctuated Predictive - double *llh_data_predfluc; + std::vector llh_data_predfluc; /// Draw vs Predictive - double *llh_draw_pred; + std::vector llh_draw_pred; /// Fluctuated Draw vs Predictive - double *llh_drawfluc_pred; + std::vector llh_drawfluc_pred; /// Fluctuated Predictive vs Predictive - double *llh_predfluc_pred; + std::vector llh_predfluc_pred; /// Fluctuated Draw vs Fluctuated Predictive - double *llh_drawfluc_predfluc; + std::vector llh_drawfluc_predfluc; /// Fluctuated Data vs Draw - double *llh_datafluc_draw; + std::vector llh_datafluc_draw; /// Projection X (most likely muon momentum) of LLH - double *llh_data_draw_ProjectX; - double *llh_drawfluc_draw_ProjectX; + std::vector llh_data_draw_ProjectX; + std::vector llh_drawfluc_draw_ProjectX; /// LLH penalty for each throw double llh_penalty; @@ -337,5 +337,5 @@ class SampleSummary { int nModelParams; /// Tells Debug level to save additional histograms - _int_ Debug; + int Debug; }; diff --git a/mcmc/StatisticalUtils.h b/mcmc/StatisticalUtils.h index 4680ef0d4..4312b6b3a 100644 --- a/mcmc/StatisticalUtils.h +++ b/mcmc/StatisticalUtils.h @@ -12,6 +12,9 @@ #include "samplePDF/Structs.h" #include "manager/manager.h" +/// @file StatisticalUtils.h +/// @brief Utility functions for statistical interpretations in MaCh3 + // ************************** /// @brief KS: Following H. Jeffreys \cite jeffreys1998theory /// @param BayesFactor Obtained value of Bayes factor @@ -89,7 +92,7 @@ inline double GetBIC(const double llh, const int data, const int nPars){ MACH3LOG_ERROR("You haven't passed number of model parameters as it is still zero"); throw MaCh3Exception(__FILE__ , __LINE__ ); } - const double BIC = nPars * logl(data) + llh; + const double BIC = double(nPars * logl(data) + llh); return BIC; } @@ -120,9 +123,9 @@ inline void CheckBonferoniCorrectedpValue(const std::vector& Sample MACH3LOG_ERROR("Size of vectors do not match"); throw MaCh3Exception(__FILE__ , __LINE__ ); } - const int NumberOfStatisticalTests = SampleNameVec.size(); + const size_t NumberOfStatisticalTests = SampleNameVec.size(); //KS: 0.05 or 5% is value used by T2K. - const double StatisticalSignificanceDown = Threshold / NumberOfStatisticalTests; + const double StatisticalSignificanceDown = Threshold / double(NumberOfStatisticalTests); const double StatisticalSignificanceUp = 1 - StatisticalSignificanceDown; MACH3LOG_INFO("Bonferroni-corrected statistical significance level: {:.2f}", StatisticalSignificanceDown); @@ -417,8 +420,6 @@ inline void GetCredibleInterval(TH1D* const hist, TH1D* hpost_copy, const double { if(hist_copy_fill[i]) hpost_copy->SetBinContent(i, hist->GetBinContent(i)); } - - return; } // *************** @@ -473,8 +474,6 @@ inline void GetCredibleRegion(TH2D* const hist2D, const double coverage = 0.6827 Contour[0] = max_entries; } hist2D->SetContour(1, Contour); - - return; } // ********************* @@ -527,8 +526,61 @@ inline double FisherCombinedPValue(const std::vector& pvalues) { testStatistic += -2.0 * std::log(pval); } // Degrees of freedom is twice the number of p-values - int degreesOfFreedom = 2 * pvalues.size(); + int degreesOfFreedom = int(2 * pvalues.size()); double pValue = TMath::Prob(testStatistic, degreesOfFreedom); return pValue; } + +// ******************** +/// @brief Thin MCMC Chain, to save space and maintain low autocorrelations. +/// +/// @param FilePath Path to MCMC chain you want to thin +/// @param ThinningCut every which entry you want to thin +/// @cite 2011ThinningMCMC +/// @warning Thinning is done over entry not steps, it may now work very well for merged chains +inline void ThinningMCMC(const std::string& FilePath, const int ThinningCut) { +// ******************** + // Define the path for the temporary thinned file + std::string TempFilePath = "Thinned_" + FilePath; + int ret = system(("cp " + FilePath + " " + TempFilePath).c_str()); + if (ret != 0) { + MACH3LOG_WARN("Error: system call to copy file failed with code {}", ret); + } + + TFile *inFile = TFile::Open(TempFilePath.c_str(), "UPDATE"); + if (!inFile || inFile->IsZombie()) { + MACH3LOG_ERROR("Error opening file: {}", TempFilePath); + throw MaCh3Exception(__FILE__, __LINE__); + } + + TTree *inTree = inFile->Get("posteriors"); + if (!inTree) { + MACH3LOG_ERROR("Error: TTree 'posteriors' not found in file."); + inFile->ls(); + inFile->Close(); + throw MaCh3Exception(__FILE__, __LINE__); + } + + // Clone the structure without data + TTree *outTree = inTree->CloneTree(0); + + // Loop over entries and apply thinning + Long64_t nEntries = inTree->GetEntries(); + double retainedPercentage = (double(nEntries) / ThinningCut) / double(nEntries) * 100; + MACH3LOG_INFO("Thinning will retain {:.2f}% of chains", retainedPercentage); + for (Long64_t i = 0; i < nEntries; i++) { + if (i % (nEntries/10) == 0) { + MaCh3Utils::PrintProgressBar(i, nEntries); + } + if (i % ThinningCut == 0) { + inTree->GetEntry(i); + outTree->Fill(); + } + } + inFile->WriteTObject(outTree, "posteriors", "kOverwrite"); + inFile->Close(); + delete inFile; + + MACH3LOG_INFO("Thinned TTree saved and overwrote original in: {}", TempFilePath); +} diff --git a/mcmc/mcmc.cpp b/mcmc/mcmc.cpp index 5269dc0cf..4f6a665b0 100644 --- a/mcmc/mcmc.cpp +++ b/mcmc/mcmc.cpp @@ -10,7 +10,7 @@ mcmc::mcmc(manager *man) : FitterBase(man) { // Starting parameters should be thrown reject = false; - chainLength = fitMan->raw()["General"]["MCMC"]["NSteps"].as(); + chainLength = fitMan->raw()["General"]["MCMC"]["NSteps"].as(); AnnealTemp = GetFromManager(fitMan->raw()["General"]["MCMC"]["AnnealTemp"], -999); if(AnnealTemp < 0) anneal = false; @@ -200,10 +200,10 @@ void mcmc::PrintProgress() { MACH3LOG_INFO("Step:\t{}/{}, current: {:.2f}, proposed: {:.2f}", step - stepStart, chainLength, logLCurr, logLProp); MACH3LOG_INFO("Accepted/Total steps: {}/{} = {:.2f}", accCount, step - stepStart, static_cast(accCount) / static_cast(step - stepStart)); - for (std::vector::iterator it = systematics.begin(); it != systematics.end(); ++it) { - if (std::string((*it)->getName()) == "xsec_cov") { + for (covarianceBase *cov : systematics) { + if (cov->getName() == "xsec_cov") { MACH3LOG_INFO("Cross-section parameters: "); - (*it)->printNominalCurrProp(); + cov->printNominalCurrProp(); } } #ifdef DEBUF @@ -222,7 +222,7 @@ void mcmc::StartFromPreviousFit(const std::string& FitName) { // For MCMC we also need to set stepStart TFile *infile = new TFile(FitName.c_str(), "READ"); - TTree *posts = (TTree*)infile->Get("posteriors"); + TTree *posts = infile->Get("posteriors"); int step_val = 0; posts->SetBranchAddress("step",&step_val); diff --git a/plotting/CMakeLists.txt b/plotting/CMakeLists.txt index 2323f1662..88b1eaba8 100644 --- a/plotting/CMakeLists.txt +++ b/plotting/CMakeLists.txt @@ -6,8 +6,8 @@ foreach(app PlotLLH MatrixPlotter ) - add_executable( ${app} ${app}.cpp ) - target_link_libraries( ${app} MaCh3::Plotting ) + add_executable(${app} ${app}.cpp ) + target_link_libraries(${app} MaCh3::Plotting MaCh3Warnings) install(TARGETS ${app} DESTINATION ${CMAKE_BINARY_DIR}/bin) endforeach(app) diff --git a/plotting/GetPostfitParamPlots.cpp b/plotting/GetPostfitParamPlots.cpp index b1f02b0bd..b816b37d2 100644 --- a/plotting/GetPostfitParamPlots.cpp +++ b/plotting/GetPostfitParamPlots.cpp @@ -36,6 +36,10 @@ /// /// @note Originally written by Clarence, with changes by Will, updates by Kamil, and converted to a generic plotter by Ewan. +//this file has lots of usage of the ROOT plotting interface that only takes floats, turn this warning off for this CU for now +#pragma GCC diagnostic ignored "-Wfloat-conversion" +#pragma GCC diagnostic ignored "-Wconversion" + MaCh3Plotting::PlottingManager *man; TH1D *Prefit; @@ -114,7 +118,7 @@ void ReadSettings(std::shared_ptr File1) { MACH3LOG_DEBUG("Reading settings for file {}", File1->GetName()); File1->ls(); - TTree *Settings = (TTree*)(File1->Get("Settings")); + TTree *Settings = (File1->Get("Settings")); MACH3LOG_DEBUG("Got settings tree"); Settings->Print(); @@ -146,7 +150,7 @@ void ReadSettings(std::shared_ptr File1) inline TH1D* makeRatio(TH1D *PrefitCopy, TH1D *PostfitCopy, bool setAxes){ // set up the ratio hist - TH1D *Ratio = (TH1D*)PrefitCopy->Clone(); + TH1D* Ratio = static_cast(PrefitCopy->Clone()); Ratio->GetYaxis()->SetTitle("(x_{Post}-#mu_{Prior})/#sigma_{Prior}"); Ratio->SetMinimum(-3.7); Ratio->SetMaximum(3.7); @@ -224,7 +228,7 @@ inline void DrawPlots(TCanvas *plotCanv, TH1D* PrefitCopy, std::vectorPo PrefitCopy->GetYaxis()->SetTitleOffset(1.3); PrefitCopy->Draw("e2"); - for(int fileId=0; fileId < (int)PostfitVec.size(); fileId++){ + for (int fileId = 0; fileId < static_cast(PostfitVec.size()); fileId++) { TH1D *postFitHist = PostfitVec[fileId]; postFitHist->SetMarkerColor(TColor::GetColorPalette(fileId)); @@ -254,7 +258,7 @@ inline void DrawPlots(TCanvas *plotCanv, TH1D* PrefitCopy, std::vectorPo ratioHists.push_back(makeRatio(PrefitCopy, PostfitVec[0], true)); ratioHists[0]->Draw("p"); - for(int postFitIdx = 1; postFitIdx < (int)PostfitVec.size(); postFitIdx++){ + for(int postFitIdx = 1; postFitIdx < static_cast(PostfitVec.size()); postFitIdx++){ ratioHists.push_back(makeRatio(PrefitCopy, PostfitVec[postFitIdx], true)); ratioHists[postFitIdx]->SetMarkerColor(TColor::GetColorPalette(postFitIdx)); @@ -295,7 +299,7 @@ void MakeXsecPlots() { // get the names of the blocks of parameters to group together std::vector const blockNames = man->getOption>("paramGroups"); - const int XsecPlots = (int)blockNames.size(); + const int XsecPlots = static_cast(blockNames.size()); for (int i = 0; i < XsecPlots; i++) { @@ -307,10 +311,11 @@ void MakeXsecPlots() std::vector blockContents = paramBlock[2].as>(); // get num of params in the block - int nParams = (int)blockContents.size(); + const int nParams = static_cast(blockContents.size()); // set some plot things - TH1D *blockHist_prefit = new TH1D(blockName.c_str(), blockTitle.c_str(), nParams, 0.0, (double)nParams); + TH1D *blockHist_prefit = new TH1D(blockName.c_str(), + blockTitle.c_str(), nParams, 0.0, static_cast(nParams)); man->style().setTH1Style(blockHist_prefit, man->getOption("prefitHistStyle")); @@ -323,9 +328,10 @@ void MakeXsecPlots() // now set for the postfit blocks for all files std::vector blockHist_postfit_Vec; - for(int fileId = 0; fileId < man->getNFiles(); fileId++){ + for(unsigned int fileId = 0; fileId < man->getNFiles(); fileId++){ - TH1D *blockHist_postfit = new TH1D((blockName + man->getFileName(fileId)).c_str(), blockTitle.c_str(), nParams, 0.0, (double)nParams); + TH1D *blockHist_postfit = new TH1D((blockName + man->getFileName(fileId)).c_str(), + blockTitle.c_str(), nParams, 0.0, static_cast(nParams)); // loop throught all the parameters in this block and set the contents in the blocks TH1 for(int localBin=0; localBin < nParams; localBin ++){ @@ -357,7 +363,7 @@ void MakeFluxPlots() std::vector const fluxBlockNames = man->getOption>("fluxGroups"); auto const fluxBinningTable = man->getOption("FluxBinning"); - const int FluxPlots = (int)fluxBlockNames.size(); + const int FluxPlots = static_cast(fluxBlockNames.size()); for (int i = 0; i < FluxPlots; i++) { @@ -380,7 +386,7 @@ void MakeFluxPlots() MACH3LOG_CRITICAL(" Should have the form [, ]"); throw MaCh3Exception(__FILE__ , __LINE__ ); } - if(nParams != (int)binning.size() -1){ + if (nParams != static_cast(binning.size()) - 1) { MACH3LOG_CRITICAL("Binning provided for flux param block {} does not match the number of parameters specified for the block", fluxBlockName); MACH3LOG_CRITICAL(" Provided {} parameters but {} bins", nParams, binning.size() -1); throw MaCh3Exception(__FILE__ , __LINE__ ); @@ -400,7 +406,7 @@ void MakeFluxPlots() // now set for the postfit blocks for all files std::vector blockHist_postfit_Vec; - for(int fileId = 0; fileId < man->getNFiles(); fileId++){ + for(unsigned int fileId = 0; fileId < man->getNFiles(); fileId++){ TH1D *blockHist_postfit = new TH1D(fluxBlockName.c_str(), blockTitle.c_str(), nParams, binning.data()); for(int fluxParId = blockContents[0]; fluxParId <= blockContents[1]; fluxParId++){ @@ -440,7 +446,7 @@ void MakeNDDetPlots() NDbinCounter += NDSamplesBins[i]; std::vector PostfitNDDetHistVec(man->getNFiles()); - TH1D *PreFitNDDetHist = (TH1D*)man->input().getFile(0).file->Get(Form("param_%s_prefit", NDSamplesNames[i].c_str())); + TH1D *PreFitNDDetHist = man->input().getFile(0).file->Get(Form("param_%s_prefit", NDSamplesNames[i].c_str())); man->style().setTH1Style(PreFitNDDetHist, man->getOption("prefitHistStyle")); std::string temp = NDSamplesNames[i].c_str(); @@ -452,8 +458,8 @@ void MakeNDDetPlots() MACH3LOG_DEBUG(" Start bin: {} :: End bin: {}", Start, NDbinCounter); // set the x range for the postfits - for(int fileId = 0; fileId < man->getNFiles(); fileId++){ - PostfitNDDetHistVec[fileId] = (TH1D*)man->input().getFile(fileId).file->Get(Form("param_%s_%s", NDSamplesNames[i].c_str(), plotType.c_str())); + for(unsigned int fileId = 0; fileId < man->getNFiles(); fileId++){ + PostfitNDDetHistVec[fileId] = man->input().getFile(fileId).file->Get(Form("param_%s_%s", NDSamplesNames[i].c_str(), plotType.c_str())); } //KS: We dont' need name for every nd param @@ -526,8 +532,6 @@ void MakeOscPlots() void MakeXsecRidgePlots() { - - gStyle->SetPalette(51); TCanvas *blankCanv = new TCanvas ("blankCanv", "blankCanv", 2048, 2048); @@ -535,7 +539,7 @@ void MakeXsecRidgePlots() // get the names of the blocks of parameters to group together std::vector const blockNames = man->getOption>("paramGroups"); - const int XsecPlots = (int)blockNames.size(); + const int XsecPlots = static_cast(blockNames.size()); double padTopMargin = 0.9; double padBottomMargin = 0.1; @@ -544,47 +548,46 @@ void MakeXsecRidgePlots() for (int i = 0; i < XsecPlots; i++) { - // get the configuration for this parameter std::string blockName = blockNames[i]; auto const ¶mBlock = man->getOption(blockName); - std::string blockTitle = paramBlock[0].as(); - std::vector blockLimits = paramBlock[1].as>(); - std::vector blockContents = paramBlock[2].as>(); + auto blockTitle = paramBlock[0].as(); + auto blockLimits = paramBlock[1].as>(); + auto blockContents = paramBlock[2].as>(); // the directory of histograms - TDirectoryFile *posteriorDir = (TDirectoryFile *)man->input().getFile(0).file->Get("Post"); + TDirectoryFile *posteriorDir = man->input().getFile(0).file->Get("Post"); // get num of params in the block - int nParams = (int)blockContents.size(); + int nParams = static_cast(blockContents.size()); TCanvas *ridgeCanv = new TCanvas ("RidgePlotCanv", "RidgePlotCanv", 2048, 2048); ridgeCanv->Divide(1,1+nParams, 0.01, 0.0); - TLatex *title = new TLatex(); + auto title = std::make_unique(); title->SetTextAlign(21); title->SetTextSize(0.03); title->DrawLatex(0.5, 0.95, blockTitle.c_str()); - TLatex *label = new TLatex(); + auto label = std::make_unique(); label->SetTextAlign(31); label->SetTextSize(0.02); - TLine *line = new TLine(); + auto line = std::make_unique(); line->SetLineColor(kBlack); line->SetLineWidth(ridgeLineWidth); // use this to set the limits and also to plot the x axis and grid TH1D *axisPlot = new TH1D("axis plot", "", 1, blockLimits[0], blockLimits[1]); - for(int parId=0; parId < nParams; parId++){ + for(int parId = 0; parId < nParams; parId++){ std::string paramName = blockContents[parId]; - TCanvas *posteriorDistCanv = NULL; - TH1D *posteriorDist = NULL; + TCanvas *posteriorDistCanv = nullptr; + TH1D *posteriorDist = nullptr; // get the list of objects in the directory TIter next(posteriorDir->GetListOfKeys()); - while(TKey *key = (TKey*) next()){ + while (TKey* key = static_cast(next())) { // check if the end of the param name matches with the MaCh3 name, do this so we exclude things like nds_ at the start of the name std::string str(key->GetTitle()); std::string name = man->input().translateName(0, MaCh3Plotting::kPostFit, paramName); @@ -592,19 +595,20 @@ void MakeXsecRidgePlots() bool foundPar = (pos == str.length() - name.length()); if(foundPar){ - posteriorDistCanv = (TCanvas*)posteriorDir->Get(key->GetName()); - posteriorDist = (TH1D*)posteriorDistCanv->GetPrimitive(key->GetName()); + posteriorDistCanv = posteriorDir->Get(key->GetName()); + posteriorDist = static_cast(posteriorDistCanv->GetPrimitive(key->GetName())); } } - if(posteriorDist == NULL){ + if(posteriorDist == nullptr){ MACH3LOG_WARN("Couldnt find parameter {} when making ridgeline plots", paramName); continue; } // EM: do some funky scaling so that we always get evenly spaced pads in the range [bottomMargin, TopMargin] with the specified overlap - double padAnchor = padBottomMargin + ((float)(nParams - parId -1) / (float)(nParams -1)) * (padTopMargin - padBottomMargin); - double padWidth = ((padTopMargin - padBottomMargin) / (float)(nParams)); + double padAnchor = padBottomMargin + (static_cast(nParams - parId - 1) / + static_cast(nParams - 1)) * (padTopMargin - padBottomMargin); + double padWidth = (padTopMargin - padBottomMargin) / static_cast(nParams); double norm = (padTopMargin - padBottomMargin); double padTop = padWidth * (1.0 + padOverlap) * (padTopMargin - padAnchor) / norm + padAnchor; @@ -627,13 +631,14 @@ void MakeXsecRidgePlots() posteriorDist->SetTitle(""); posteriorDist->SetLineWidth(ridgeLineWidth); - TH1D *axisPlot_tmp = (TH1D*)axisPlot->Clone(Form("AxisPlot_%s", paramName.c_str())); + TH1D* axisPlot_tmp = static_cast(axisPlot->Clone(Form("AxisPlot_%s", paramName.c_str()))); axisPlot_tmp->Draw("A"); posteriorDist->Draw("H SAME"); axisPlot_tmp->GetYaxis()->SetRangeUser(0.0, 0.7 *posteriorDist->GetMaximum()); posteriorDist->SetLineColor(kWhite); - posteriorDist->SetFillColorAlpha(TColor::GetColorPalette(floor((float)parId * TColor::GetNumberOfColors()/ (float)nParams)), 0.85); + posteriorDist->SetFillColorAlpha(TColor::GetColorPalette(floor(static_cast(parId) * + TColor::GetNumberOfColors() / static_cast(nParams))), 0.85); posteriorDist->GetXaxis()->SetRangeUser(blockLimits[0], blockLimits[1]); posteriorDist->GetYaxis()->SetTitle(paramName.c_str()); @@ -651,7 +656,7 @@ void MakeXsecRidgePlots() ridgeCanv->cd(); ridgeCanv->SetGrid(1,1); - TPad *axisPad = new TPad("AxisPad", "", 0.3, 0.0, 0.9, 1.0, -1, 0, -1); + TPad *axisPad = new TPad("AxisPad", "", 0.3, 0.0, 0.9, 1.0, -1, 0, -1); axisPad->SetLeftMargin(0.0); axisPad->SetRightMargin(0.0); axisPad->Draw(); @@ -682,7 +687,6 @@ void MakeXsecRidgePlots() ridgeCanv->SaveAs("RidgePlots.pdf"); delete ridgeCanv; } - blankCanv->SaveAs("RidgePlots.pdf]"); } @@ -737,14 +741,13 @@ void GetPostfitParamPlots() p4->SetGrid(); // Make a Legend page - TLegend *leg = new TLegend(0.0, 0.0, 1.0, 1.0); - + auto leg = std::make_unique(0.0, 0.0, 1.0, 1.0); // make a dummy TH1 to set out legend Prefit = new TH1D(); man->style().setTH1Style(Prefit, man->getOption("prefitHistStyle")); leg->AddEntry(Prefit, "Prior", "lpf"); - for(int fileId = 0; fileId < man->getNFiles(); fileId++){ + for(unsigned int fileId = 0; fileId < man->getNFiles(); fileId++){ TH1D *postFitHist_tmp = new TH1D(); postFitHist_tmp->SetBit(kCanDelete); @@ -759,7 +762,6 @@ void GetPostfitParamPlots() canv->Clear(); leg->Draw(); canv->Print((SaveName).c_str()); - delete leg; MakeXsecPlots(); @@ -785,19 +787,18 @@ void GetPostfitParamPlots() inline TGraphAsymmErrors* MakeTGraphAsymmErrors(std::shared_ptr File) { - double* x = new double[nBins]; - double* y = new double[nBins]; - double* exl = new double[nBins]; - double* eyl = new double[nBins]; - double* exh = new double[nBins]; - double* eyh = new double[nBins]; - - TH1D* PostHist = (TH1D*)File->Get( ("param_xsec_"+plotType).c_str() )->Clone(); - - TVectorD* Errors_HPD_Positive = (TVectorD*)File->Get( "Errors_HPD_Positive" )->Clone(); - TVectorD* Errors_HPD_Negative = (TVectorD*)File->Get( "Errors_HPD_Negative" )->Clone(); - - //KS: I am tempted to multithred this... + std::vector x(nBins); + std::vector y(nBins); + std::vector exl(nBins); + std::vector eyl(nBins); + std::vector exh(nBins); + std::vector eyh(nBins); + + TH1D* PostHist = static_cast(File->Get( ("param_xsec_"+plotType).c_str() )->Clone()); + + TVectorD* Errors_HPD_Positive = static_cast(File->Get( "Errors_HPD_Positive" )->Clone()); + TVectorD* Errors_HPD_Negative = static_cast(File->Get( "Errors_HPD_Negative" )->Clone()); + //KS: I am tempted to multithread this... for(int i = 0; i < nBins; ++i) { //KS: We are extrcting value from three object each having different numbering scheme, I have checked carefully so this is correct please don't cahnge all these +1 +0.5 etc. it just work... @@ -810,18 +811,12 @@ inline TGraphAsymmErrors* MakeTGraphAsymmErrors(std::shared_ptr File) eyh[i] = (*Errors_HPD_Positive)(i); eyl[i] = (*Errors_HPD_Negative)(i); } - TGraphAsymmErrors* PostGraph = new TGraphAsymmErrors(nBins,x,y,exl,exh,eyl,eyh); + TGraphAsymmErrors* PostGraph = new TGraphAsymmErrors(nBins, x.data(), y.data(), exl.data(), exh.data(), eyl.data(), eyh.data()); PostGraph->SetTitle(""); delete PostHist; delete Errors_HPD_Positive; delete Errors_HPD_Negative; - delete[] x; - delete[] y; - delete[] exl; - delete[] eyl; - delete[] exh; - delete[] eyh; return PostGraph; } @@ -847,7 +842,7 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") if(PlotAssym) SaveName += "_Assym"; std::shared_ptr File1 = std::make_shared(FileName1.c_str()); - std::shared_ptr File2 = NULL; + std::shared_ptr File2 = nullptr; if(FileName2 != "") File2 = std::make_shared(FileName2.c_str()); canv = new TCanvas("canv", "canv", 1024, 1024); @@ -864,10 +859,10 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") canv->Print((SaveName+".pdf[").c_str()); canv->SetGrid(); - Violin = NULL; - ViolinPre = (TH2D*)File1->Get( "param_violin_prior" ); - Violin = (TH2D*)File1->Get( "param_violin" ); - if(Violin == NULL) + Violin = nullptr; + ViolinPre = File1->Get( "param_violin_prior" ); + Violin = File1->Get( "param_violin" ); + if(Violin == nullptr) { MACH3LOG_ERROR("Couldn't find violin plot, make sure method from MCMCProcessor is being called"); return; @@ -889,7 +884,7 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") Violin->SetMarkerStyle(20); Violin->SetMarkerSize(0.5); - TH1D* Postfit = (TH1D*)File1->Get( ("param_xsec_"+plotType).c_str() ); + TH1D* Postfit = File1->Get( ("param_xsec_"+plotType).c_str() ); Postfit->SetMarkerColor(kRed); Postfit->SetLineColor(kRed); Postfit->SetMarkerStyle(7); @@ -900,9 +895,9 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") PostGraph->SetMarkerStyle(7); PostGraph->SetLineWidth(2); PostGraph->SetLineStyle(kSolid); - if(File2 != NULL) + if(File2 != nullptr) { - Violin2 = (TH2D*)File2->Get( "param_violin" ); + Violin2 = File2->Get( "param_violin" ); Violin2->SetMarkerColor(kGreen); Violin2->SetLineColor(kGreen); Violin2->SetFillColor(kGreen); @@ -910,18 +905,17 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") } // Make a Legend page - TLegend *leg = new TLegend(0.0, 0.0, 1.0, 1.0); - if (ViolinPre != NULL) leg->AddEntry(ViolinPre, "Prior", "lpf"); - if (Violin != NULL) leg->AddEntry(Violin, "Posterior", "lpf"); - if (Violin2 != NULL) leg->AddEntry(Violin2, "Second Violin", "lpf"); - if(PlotAssym) leg->AddEntry(PostGraph, "HPD Assym", "lp"); - else leg->AddEntry(Postfit, "HPD", "lpf"); + auto leg = std::make_unique(0.0, 0.0, 1.0, 1.0); + if (ViolinPre != nullptr) leg->AddEntry(ViolinPre, "Prior", "lpf"); + if (Violin != nullptr) leg->AddEntry(Violin, "Posterior", "lpf"); + if (Violin2 != nullptr) leg->AddEntry(Violin2, "Second Violin", "lpf"); + if(PlotAssym) leg->AddEntry(PostGraph, "HPD Assym", "lp"); + else leg->AddEntry(Postfit, "HPD", "lpf"); canv->cd(); canv->Clear(); leg->Draw(); canv->Print((SaveName+".pdf").c_str()); - delete leg; // Do some fancy replacements PrettifyTitles(ViolinPre); @@ -939,47 +933,47 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") Violin->GetXaxis()->SetRangeUser(XsecOffset[i-1], XsecOffset[i]); Violin->GetYaxis()->SetRangeUser(-1.5, 2.5); - if(File2 != NULL) Violin2->GetYaxis()->SetRangeUser(-1.5, 2.5); + if(File2 != nullptr) Violin2->GetYaxis()->SetRangeUser(-1.5, 2.5); std::string name = ViolinPre->GetTitle(); if (name.find("SPP") != std::string::npos) { ViolinPre->GetYaxis()->SetRangeUser(-5., 25.); //For RES Eb we need huge range Violin->GetYaxis()->SetRangeUser(-5., 25.); - if(File2 != NULL) Violin2->GetYaxis()->SetRangeUser(-5., 25.); + if(File2 != nullptr) Violin2->GetYaxis()->SetRangeUser(-5., 25.); } else if (name.find("CCQE Binding Energy") != std::string::npos) { ViolinPre->GetYaxis()->SetRangeUser(-10., 16.); //For Eb we need bigger range Violin->GetYaxis()->SetRangeUser(-10., 16.); //For Eb we need bigger range - if(File2 != NULL) Violin2->GetYaxis()->SetRangeUser(-10., 16.); + if(File2 != nullptr) Violin2->GetYaxis()->SetRangeUser(-10., 16.); } else if (name.find("FSI") != std::string::npos) { ViolinPre->GetYaxis()->SetRangeUser(-0.5, 2.8); Violin->GetYaxis()->SetRangeUser(-0.5, 2.8); - if(File2 != NULL) Violin2->GetYaxis()->SetRangeUser(-0.5, 2.8); + if(File2 != nullptr) Violin2->GetYaxis()->SetRangeUser(-0.5, 2.8); } else if (name.find("CCQE") != std::string::npos) { ViolinPre->GetYaxis()->SetRangeUser(-2., 3.); Violin->GetYaxis()->SetRangeUser(-2., 3.); - if(File2 != NULL) Violin2->GetYaxis()->SetRangeUser(-2., 3.); + if(File2 != nullptr) Violin2->GetYaxis()->SetRangeUser(-2., 3.); } //KS: ROOT6 has some additional options, consider updating it. more https://root.cern/doc/master/classTHistPainter.html#HP140b ViolinPre->Draw("violinX(03100300)"); Violin->Draw("violinX(03100300) SAME"); - if(File2 != NULL) + if(File2 != nullptr) { Violin2->GetXaxis()->SetRangeUser(XsecOffset[i-1], XsecOffset[i]); Violin2->GetXaxis()->SetRangeUser(XsecOffset[i-1], XsecOffset[i]); Violin2->Draw("violinX(03100300) SAME"); } - if (Postfit != NULL) { + if (Postfit != nullptr) { Postfit->GetXaxis()->SetRangeUser(XsecOffset[i-1], XsecOffset[i]); if(!PlotAssym) Postfit->Draw("SAME"); } - if (PostGraph != NULL) { + if (PostGraph != nullptr) { PostGraph->GetXaxis()->SetRangeUser(XsecOffset[i-1], XsecOffset[i]); if(PlotAssym) PostGraph->Draw("P SAME"); } @@ -1008,7 +1002,7 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") //KS: ROOT6 has some additional options, consider updaiting it. more https://root.cern/doc/master/classTHistPainter.html#HP140b ViolinPre->Draw("violinX(03100300)"); Violin->Draw("violinX(03100300) SAME"); - if(File2 != NULL) + if(File2 != nullptr) { Violin2->GetYaxis()->SetRangeUser(0.7, 1.3); Violin2->GetXaxis()->SetRangeUser(nFlux, nFlux+FluxInterval); @@ -1033,7 +1027,7 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") //KS: ROOT6 has some additional options, consider updating it. more https://root.cern/doc/master/classTHistPainter.html#HP140b ViolinPre->Draw("violinX(03100300)"); Violin->Draw("violinX(03100300) SAME"); - if(File2 != NULL) + if(File2 != nullptr) { Violin2->GetXaxis()->SetRangeUser(CrossSectionParameters, nBins); Violin2->GetYaxis()->SetRangeUser(-3.4, 3.4); @@ -1047,11 +1041,11 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") delete canv; delete ViolinPre; delete Violin; - if(Violin2 != NULL) delete Violin2; + if(Violin2 != nullptr) delete Violin2; delete Postfit; delete PostGraph; File1->Close(); - if(File2 != NULL) + if(File2 != nullptr) { File2->Close(); } @@ -1059,45 +1053,45 @@ void GetViolinPlots(std::string FileName1 = "", std::string FileName2 = "") int main(int argc, char *argv[]) { - SetMaCh3LoggerFormat(); - - man = new MaCh3Plotting::PlottingManager(); - man->parseInputs(argc, argv); - std::cout << std::endl << std::endl << "====================" << std::endl; - man->input().getFile(0).file->ls(); - - man->setExec("GetPostfitParamPlots"); + SetMaCh3LoggerFormat(); - man->style().setPalette(man->getOption("colorPalette")); + man = new MaCh3Plotting::PlottingManager(); + man->parseInputs(argc, argv); + std::cout << std::endl << std::endl << "====================" << std::endl; + man->input().getFile(0).file->ls(); - GetPostfitParamPlots(); + man->setExec("GetPostfitParamPlots"); - if (argc != 2 && argc != 3 && argc !=4) - { - MACH3LOG_CRITICAL("Invalid command line options specified"); - MACH3LOG_CRITICAL("How to use: {} .root", argv[0]); - MACH3LOG_CRITICAL("You can add up to 3 different files"); - throw MaCh3Exception(__FILE__, __LINE__); - } - - if (argc == 2) - { - std::string filename = argv[1]; - GetViolinPlots(filename); - } - else if (argc == 3) - { - std::string filename1 = argv[1]; - std::string filename2 = argv[2]; - GetViolinPlots(filename1, filename2); - } - else if (argc == 4) - { - std::string filename1 = argv[1]; - std::string filename2 = argv[2]; - std::string filename3 = argv[3]; - //KS: Violin plot currently not supported by three file veriosn although it should be super easy to adapt - } + man->style().setPalette(man->getOption("colorPalette")); + + GetPostfitParamPlots(); + + if (argc != 2 && argc != 3 && argc !=4) + { + MACH3LOG_CRITICAL("Invalid command line options specified"); + MACH3LOG_CRITICAL("How to use: {} .root", argv[0]); + MACH3LOG_CRITICAL("You can add up to 3 different files"); + throw MaCh3Exception(__FILE__, __LINE__); + } + + if (argc == 2) + { + std::string filename = argv[1]; + GetViolinPlots(filename); + } + else if (argc == 3) + { + std::string filename1 = argv[1]; + std::string filename2 = argv[2]; + GetViolinPlots(filename1, filename2); + } + else if (argc == 4) + { + std::string filename1 = argv[1]; + std::string filename2 = argv[2]; + std::string filename3 = argv[3]; + //KS: Violin plot currently not supported by three file veriosn although it should be super easy to adapt + } return 0; } diff --git a/plotting/MatrixPlotter.cpp b/plotting/MatrixPlotter.cpp index 9556feefc..711757a6e 100755 --- a/plotting/MatrixPlotter.cpp +++ b/plotting/MatrixPlotter.cpp @@ -12,6 +12,10 @@ #include "plottingUtils/plottingUtils.h" #include "plottingUtils/plottingManager.h" +//this file has lots of usage of the ROOT plotting interface that only takes floats, turn this warning off for this CU for now +#pragma GCC diagnostic ignored "-Wfloat-conversion" +#pragma GCC diagnostic ignored "-Wconversion" + TH2D* GetSubMatrix(TH2D *MatrixFull, const std::string& Title, const std::vector& Params) { std::vector ParamIndex(Params.size(), -999); diff --git a/plotting/PlotLLH.cpp b/plotting/PlotLLH.cpp index 82785a4dd..e1e1a90b3 100644 --- a/plotting/PlotLLH.cpp +++ b/plotting/PlotLLH.cpp @@ -5,6 +5,10 @@ // MACH3 PLOTTING #include "plottingUtils/plottingManager.h" +//this file has lots of usage of the ROOT plotting interface that only takes floats, turn this warning off for this CU for now +#pragma GCC diagnostic ignored "-Wfloat-conversion" +#pragma GCC diagnostic ignored "-Wconversion" + // some options for the plots double ratioPlotSplit; double yTitleOffset; @@ -22,7 +26,6 @@ void getSplitSampleStack(int fileIdx, std::string parameterName, TH1D LLH_allSam THStack *sampleStack, TLegend *splitSamplesLegend, float baselineLLH_main = 0.00001) { - std::vector sampNames = man->input().getTaggedSamples(man->getOption>("sampleTags")); size_t nSamples = sampNames.size(); @@ -57,9 +60,9 @@ void getSplitSampleStack(int fileIdx, std::string parameterName, TH1D LLH_allSam LLH_indivSam->SetStats(0); LLH_indivSam->SetLineColor(TColor::GetColorPalette( - floor((float)i * TColor::GetNumberOfColors() / (float)nSamples))); + floor(static_cast(i) * TColor::GetNumberOfColors() / static_cast(nSamples)))); LLH_indivSam->SetFillColor(TColor::GetColorPalette( - floor((float)i * TColor::GetNumberOfColors() / (float)nSamples))); + floor(static_cast(i) * TColor::GetNumberOfColors() / static_cast(nSamples)))); sampleStack->Add(LLH_indivSam); splitSamplesLegend->AddEntry(LLH_indivSam, man->style().prettifySampleName(sampName).c_str(), "lf"); @@ -81,13 +84,9 @@ void getSplitSampleStack(int fileIdx, std::string parameterName, TH1D LLH_allSam { drawLabel[i] = false; } - MACH3LOG_DEBUG(" drawLabel = {}", drawLabel.back()); MACH3LOG_DEBUG(""); - } - - return; } // handy dandy helper function for drawing and nicely formatting a stack of ratio plots @@ -124,7 +123,7 @@ void makeLLHScanComparisons(std::string paramName, std::string LLHType, std::str // will use these to make comparisons of likelihoods THStack *compStack = new THStack("LLH_Stack", ""); THStack *ratioCompStack = new THStack("LLH_Ratio_Stack", ""); - TLegend *legend = new TLegend(0.3, 0.6, 0.7, 0.8); + auto legend = std::make_unique(0.3, 0.6, 0.7, 0.8); // get the sample reweight hist from the main file TH1D LLH_main = man->input().getLLHScan_TH1D(0, paramName, LLHType); @@ -137,9 +136,8 @@ void makeLLHScanComparisons(std::string paramName, std::string LLHType, std::str int nBins = LLH_main.GetNbinsX(); // go through the other files - for (int extraFileIdx = 1; extraFileIdx < man->input().getNInputFiles(); extraFileIdx++) + for (unsigned int extraFileIdx = 1; extraFileIdx < man->input().getNInputFiles(); extraFileIdx++) { - TH1D *compHist = new TH1D(man->input().getLLHScan_TH1D(extraFileIdx, paramName, LLHType)); compHist->SetBit(kCanDelete); // <- will allow this to be deleted by root once done plotting if (compHist->GetNbinsX() == 0) @@ -147,12 +145,12 @@ void makeLLHScanComparisons(std::string paramName, std::string LLHType, std::str // make them look different to each other compHist->SetLineColor( - TColor::GetColorPalette(floor((float)extraFileIdx * TColor::GetNumberOfColors() / - (float)man->input().getNInputFiles()))); + TColor::GetColorPalette(floor(static_cast(extraFileIdx) * TColor::GetNumberOfColors() / + static_cast(man->input().getNInputFiles())))); compHist->SetLineStyle(2 + extraFileIdx % 9); compHist->SetLineWidth(lineWidth); - TH1D *divHist = (TH1D *)compHist->Clone(Form("RatioHist_%i", extraFileIdx)); + TH1D *divHist = static_cast(compHist->Clone(Form("RatioHist_%i", extraFileIdx))); divHist->SetBit(kCanDelete); if (man->getPlotRatios()) @@ -206,12 +204,10 @@ void makeLLHScanComparisons(std::string paramName, std::string LLHType, std::str // will use these to make comparisons of likelihoods delete compStack; delete ratioCompStack; - delete legend; } void makeSplitSampleLLHScanComparisons(std::string paramName, std::string outputFileName, TCanvas *canv, TPad *LLHPad, TPad *ratioPad) { - MACH3LOG_DEBUG(" Making split sample LLH comparison"); canv->Clear(); canv->Draw(); @@ -276,7 +272,7 @@ void makeSplitSampleLLHScanComparisons(std::string paramName, std::string output } // now we plot the comparisson file plots - for (int extraFileIdx = 1; extraFileIdx < man->getNFiles(); extraFileIdx++) + for (unsigned int extraFileIdx = 1; extraFileIdx < man->getNFiles(); extraFileIdx++) { MACH3LOG_DEBUG(" - Adding plot for additional file {}", extraFileIdx); canv->cd(1 + extraFileIdx); @@ -342,7 +338,6 @@ void makeSplitSampleLLHScanComparisons(std::string paramName, std::string output if (man->getPlotRatios()) { - THStack *splitSamplesStackRatios = new THStack(paramName.c_str(), ""); TList *baselineHistList = baseSplitSamplesStack->GetHists(); @@ -356,10 +351,10 @@ void makeSplitSampleLLHScanComparisons(std::string paramName, std::string output Form("%s_%s_splitDiv", paramName.c_str(), man->getFileLabel(extraFileIdx).c_str()), compLLH_main.GetNbinsX(), compLLH_main.GetBinLowEdge(1), compLLH_main.GetBinLowEdge(compLLH_main.GetNbinsX() + 1)); - divHist->Divide((TH1D *)compHistList->At(sampleIdx), - (TH1D *)baselineHistList->At(sampleIdx)); + divHist->Divide(static_cast(compHistList->At(sampleIdx)), + static_cast(baselineHistList->At(sampleIdx))); splitSamplesStackRatios->Add(divHist); - divHist->SetLineColor(((TH1D *)compHistList->At(sampleIdx))->GetLineColor()); + divHist->SetLineColor((static_cast(compHistList->At(sampleIdx))->GetLineColor())); } canv->cd(2 + extraFileIdx); diff --git a/plotting/plottingUtils/CMakeLists.txt b/plotting/plottingUtils/CMakeLists.txt index dceb7be43..87710f3e3 100644 --- a/plotting/plottingUtils/CMakeLists.txt +++ b/plotting/plottingUtils/CMakeLists.txt @@ -17,7 +17,8 @@ set_target_properties( Plotting PROPERTIES EXPORT_NAME Plotting ) -target_link_libraries( Plotting MaCh3::Manager MaCh3::MCMC ) +target_link_libraries(Plotting PUBLIC MaCh3::Manager MaCh3::MCMC) +target_link_libraries(Plotting PRIVATE MaCh3Warnings) ## to be compiled into python module needs to be compiled as position independent library if( MaCh3_PYTHON_ENABLED ) diff --git a/plotting/plottingUtils/inputManager.cpp b/plotting/plottingUtils/inputManager.cpp index a2102d398..8d23b8c07 100644 --- a/plotting/plottingUtils/inputManager.cpp +++ b/plotting/plottingUtils/inputManager.cpp @@ -108,7 +108,7 @@ void InputManager::print(const std::string &printLevel) const { MACH3LOG_INFO(""); } -float InputManager::getPostFitError(int fileNum, const std::string ¶mName, +double InputManager::getPostFitError(int fileNum, const std::string ¶mName, std::string errorType) const { const InputFile &inputFileDef = getFile(fileNum); @@ -137,7 +137,7 @@ float InputManager::getPostFitError(int fileNum, const std::string ¶mName, return _BAD_DOUBLE_; } -float InputManager::getPostFitValue(int fileNum, const std::string ¶mName, +double InputManager::getPostFitValue(int fileNum, const std::string ¶mName, std::string errorType) const { const InputFile &inputFileDef = getFile(fileNum); @@ -307,11 +307,11 @@ std::shared_ptr InputManager::findRootObject(const InputFile &fileDef, // match with the end of the objects in the file else if (locationVec.size() == 2) { - TDirectoryFile *directory = (TDirectoryFile*)fileDef.file->Get(locationVec[0].c_str()); + TDirectoryFile *directory = fileDef.file->Get(locationVec[0].c_str()); size_t nMatchingObjects = 0; // let's make sure that the directory itself exists - if (directory == NULL) + if (directory == nullptr) { object = nullptr; } @@ -321,7 +321,7 @@ std::shared_ptr InputManager::findRootObject(const InputFile &fileDef, // loop through the keys in the directory and find objects whose name matches the specified // pattern TIter next(directory->GetListOfKeys()); - while (TKey *key = (TKey*)next()) + while (TKey *key = static_cast(next())) { if (strEndsWith(std::string(key->GetName()), locationVec[1])) { @@ -391,10 +391,10 @@ bool InputManager::findBySampleLLH(InputFile &inputFileDef, const std::string &p if (LLHObjType == "TH1D") { - LLHGraph = std::make_shared((TH1D*)LLHObj.get()); + LLHGraph = std::make_shared(static_cast(LLHObj.get())); } else if (LLHObjType == "TGraph") { - LLHGraph = std::shared_ptr((TGraph*)LLHObj->Clone()); + LLHGraph = std::shared_ptr(static_cast(LLHObj->Clone())); } else { throw MaCh3Exception(__FILE__ , __LINE__, "uknown type of LLH object specified: " + LLHObjType); @@ -704,16 +704,15 @@ void InputManager::fillFileInfo(InputFile &inputFileDef, bool printThoughts) { MACH3LOG_DEBUG("Initialising MCMCProcessor for the input file"); std::vector posteriorTreeRawLocations = thisFitterSpec_config["MCMCsteps"]["location"].as>(); - TTree *postTree = NULL; + TTree *postTree = nullptr; for ( const std::string &rawLoc: posteriorTreeRawLocations ) { MACH3LOG_DEBUG(" - Looking for MCMC chain parameter values at: {}", rawLoc); - TObject *postTreeObj = inputFileDef.file->Get(rawLoc.c_str()); + postTree = inputFileDef.file->Get(rawLoc.c_str()); - if ( postTreeObj != NULL ) + if ( postTree != nullptr ) { - postTree = (TTree *) postTreeObj; inputFileDef.mcmcProc = new MCMCProcessor(inputFileDef.fileName); inputFileDef.mcmcProc->Initialise(); @@ -722,10 +721,10 @@ void InputManager::fillFileInfo(InputFile &inputFileDef, bool printThoughts) { } } - if ( postTree != NULL ) + if ( postTree != nullptr ) { inputFileDef.posteriorTree = postTree; - inputFileDef.nMCMCentries = postTree->GetEntries(); + inputFileDef.nMCMCentries = int(postTree->GetEntries()); } } @@ -861,12 +860,12 @@ void InputManager::fillFileData(InputFile &inputFileDef, bool printThoughts) { // it to?? if (LLHObjType == "TH1D") { - LLHGraph = std::make_shared((TH1D*)LLHObj.get()); + LLHGraph = std::make_shared(static_cast(LLHObj.get())); } else if (LLHObjType == "TGraph") { - LLHGraph = std::shared_ptr((TGraph*)LLHObj.get()->Clone()); + LLHGraph = std::shared_ptr(static_cast(LLHObj->Clone())); } else diff --git a/plotting/plottingUtils/inputManager.h b/plotting/plottingUtils/inputManager.h index 46446e1a0..28bec1e12 100644 --- a/plotting/plottingUtils/inputManager.h +++ b/plotting/plottingUtils/inputManager.h @@ -282,12 +282,12 @@ class InputManager { /// @param paramName The parameter you want the information about. /// @param LLHType The type of log likelihood scan you want (e.g. total, penalty, etc.) /// @return A vector of vectors containing the LLH scan data. First entry is x axis, 2nd is y axis - std::vector> getLLHScan(int fileNum, std::string paramName, std::string LLHType) const { + std::vector> getLLHScan(int fileNum, std::string paramName, std::string LLHType) const { if (!getEnabledLLH(fileNum, paramName, LLHType)) { MACH3LOG_WARN("file at index {} does not have LLH scan for parameter {}", fileNum, paramName); MACH3LOG_WARN("am returning an empty vector"); - return std::vector>(2); + return std::vector>(2); } return TGraphToVector(*_fileVec[fileNum].LLHScans_map.at(LLHType).at(paramName)); } @@ -331,12 +331,12 @@ class InputManager { /// @param fileNum The index of the file you want the data from. /// @param paramName The parameter you want the information about. /// @return A vector of vectors containing the posterior data. First entry is x axis (i.e. the parameter values), 2nd is y axis - std::vector> get1dPosterior(int fileNum, std::string paramName) const { + std::vector> get1dPosterior(int fileNum, std::string paramName) const { if (!getEnabled1dPosteriors(fileNum, paramName)) { MACH3LOG_WARN("file at index {} does not have a 1d posterior for parameter {}", fileNum, paramName); MACH3LOG_WARN("am returning an empty vector"); - return std::vector>(2); + return std::vector>(2); } return TGraphToVector(*_fileVec[fileNum].posteriors1d_map.at(paramName)); } @@ -380,12 +380,12 @@ class InputManager { /// @param paramName The name of the parameter whose LLH scan you would like. /// @param sample The sample that you would like the LLH scan for. /// @return A vector of vectors containing the LLH scan data. First entry is x axis, 2nd is y axis. - std::vector> getSampleSpecificLLHScan(int fileNum, std::string paramName, std::string sample) const { + std::vector> getSampleSpecificLLHScan(int fileNum, std::string paramName, std::string sample) const { if (!getEnabledLLHBySample(fileNum, paramName, sample)) { MACH3LOG_WARN("file at index {} does not have LLH scan for sample {} for parameter {}", fileNum, sample, paramName); MACH3LOG_WARN("am returning an empty vector"); - return std::vector>(2); + return std::vector>(2); } return TGraphToVector(*_fileVec[fileNum].LLHScansBySample_map.at(sample).at(paramName)); } @@ -455,7 +455,7 @@ class InputManager { /// possible types will be fitter dependent, e.g. "gauss" or "hpd" for MaCh3. If not specified, /// will use the default one, as specified in the fitter definition config. /// @return The error on the specified parameter. - float getPostFitError(int fileNum, const std::string ¶mName, std::string errorType = "") const; + double getPostFitError(int fileNum, const std::string ¶mName, std::string errorType = "") const; /// @brief Get the post fit value for a particular parameter from a particular input file. /// @param fileNum The index of the file that you would like to get the value from. @@ -464,13 +464,13 @@ class InputManager { /// possible types will be fitter dependent, e.g. "gauss" or "hpd" for MaCh3. If not specified, /// will use the default one, as specified in the fitter definition config. /// @return The value of the specified parameter. - float getPostFitValue(int fileNum, const std::string ¶mName, std::string errorType = "") const; + double getPostFitValue(int fileNum, const std::string ¶mName, std::string errorType = "") const; /// @name General Getters /// @{ inline const std::vector &getKnownParameters() const { return _knownParameters; } inline const std::vector &getKnownSamples() const { return _knownSamples; } - inline int getNInputFiles() const { return _fileVec.size(); } + inline size_t getNInputFiles() const { return _fileVec.size(); } /// @brief Get all parameters which have some set of tags /// @param tags The tags to check for @@ -636,7 +636,7 @@ class InputManager { // helper fn to test if string "str" ends with other string "ending" inline bool strEndsWith(std::string str, std::string ending) const { - uint pos = str.find(ending); + size_t pos = str.find(ending); return (pos == str.length() - ending.length()); } diff --git a/plotting/plottingUtils/plottingManager.cpp b/plotting/plottingUtils/plottingManager.cpp index dba1969d7..e99d6e4cd 100644 --- a/plotting/plottingUtils/plottingManager.cpp +++ b/plotting/plottingUtils/plottingManager.cpp @@ -89,7 +89,7 @@ void PlottingManager::initialise() { /// @endcode /// @todo make this able to return any un-parsed arguments so that user can specify their own /// arguments for use in their plotting scripts -void PlottingManager::parseInputs(int argc, char **argv) { +void PlottingManager::parseInputs(int argc, char * const *argv) { // parse the inputs int c; while ((c = getopt(argc, argv, "o:l:d:c:srgh")) != -1) @@ -221,10 +221,10 @@ void PlottingManager::setOutFileName(std::string saveName) { /// separate files: can specify suffix "_PriotLLH" will return OutputName_PriorLLH.ext /// @todo Make this support .root files too const std::string PlottingManager::getOutputName(const std::string &suffix) { - std::string ext = std::string(_outputName); - std::string name = std::string(_outputName); + std::string ext = _outputName; + std::string name = _outputName; - int dotPos = 0; + size_t dotPos = 0; while (ext.find(".") != std::string::npos) { dotPos += ext.find("."); @@ -248,8 +248,8 @@ void PlottingManager::parseFileLabels(std::string labelString, std::vector argv) { - std::vector charVec; + std::vector charVec; MACH3LOG_DEBUG("Parsing Inputs :: was given vector:"); for( const std::string &arg : argv ) { - charVec.push_back( (char*)arg.c_str() ); + charVec.push_back( const_cast(arg.c_str()) ); MACH3LOG_DEBUG(" - {}", arg ); } - parseInputs(argv.size(), charVec.data()); + parseInputs(int(argv.size()), charVec.data()); } /// @brief Describe an option you want to add to the PlottingManager which can be read in from the @@ -146,7 +146,7 @@ class PlottingManager { const std::vector getFileLabels() { return _fileLabels; } - int getNFiles() { return (int)_fileNames.size(); } + size_t getNFiles() { return _fileNames.size(); } bool getSplitBySample() { return _splitBySample; } diff --git a/plotting/plottingUtils/plottingUtils.cpp b/plotting/plottingUtils/plottingUtils.cpp index 1ba4a151e..95b553515 100644 --- a/plotting/plottingUtils/plottingUtils.cpp +++ b/plotting/plottingUtils/plottingUtils.cpp @@ -22,6 +22,10 @@ TH1D TGraphToTH1D(TGraph graph, std::string newName, std::string newTitle) { title = newTitle; int nPoints = graph.GetN(); + if(nPoints < 2){ + MACH3LOG_ERROR("Too few points in the graph."); + throw MaCh3Exception(__FILE__,__LINE__); + } std::vector pointsX(nPoints); std::vector pointsY(nPoints); @@ -57,12 +61,12 @@ TH1D TGraphToTH1D(TGraph graph, std::string newName, std::string newTitle) { } -std::vector> TGraphToVector(TGraph graph) { +std::vector> TGraphToVector(TGraph graph) { int nPoints = graph.GetN(); - std::vector> ret(2); - std::vector pointsX(nPoints); - std::vector pointsY(nPoints); + std::vector> ret(2); + std::vector pointsX(nPoints); + std::vector pointsY(nPoints); // Get the points out Double_t x, y; @@ -81,13 +85,13 @@ std::vector> TGraphToVector(TGraph graph) { } -std::vector> TGraphToVector(TGraph2D graph) { +std::vector> TGraphToVector(TGraph2D graph) { int nPoints = graph.GetN(); - std::vector> ret(3); - std::vector pointsX(nPoints); - std::vector pointsY(nPoints); - std::vector pointsZ(nPoints); + std::vector> ret(3); + std::vector pointsX(nPoints); + std::vector pointsY(nPoints); + std::vector pointsZ(nPoints); // Get the points out Double_t x, y, z; diff --git a/plotting/plottingUtils/plottingUtils.h b/plotting/plottingUtils/plottingUtils.h index d9808dcd8..fff30fd86 100644 --- a/plotting/plottingUtils/plottingUtils.h +++ b/plotting/plottingUtils/plottingUtils.h @@ -46,13 +46,13 @@ TH1D TGraphToTH1D(TGraph graph, std::string newName = "", std::string newTitle = /// @brief This handy little function lets you interpret a TGraph as a vector containing the same data. /// @param graph The graph you want to convert. /// @return A vector of vectors containing the data from the initial graph. The first vector is the x axis, the 2nd the y axis -std::vector> TGraphToVector(TGraph graph); +std::vector> TGraphToVector(TGraph graph); /// @brief This handy little function lets you interpret a 2d TGraph as a vector containing the same data. /// @param graph The graph you want to convert. /// @return A vector of vectors containing the data from the initial graph. The first vector is the x axis, the 2nd the y axis, the 3rd is the z axis -std::vector> TGraphToVector(TGraph2D graph); +std::vector> TGraphToVector(TGraph2D graph); /// @} diff --git a/plotting/plottingUtils/styleManager.cpp b/plotting/plottingUtils/styleManager.cpp index a4a1f2888..49b6e4634 100644 --- a/plotting/plottingUtils/styleManager.cpp +++ b/plotting/plottingUtils/styleManager.cpp @@ -31,7 +31,7 @@ void StyleManager::setPalette(std::string configStyleName) const { std::vector> paletteDef = palettes[configStyleName].as>>(); - const Int_t NCont = (Int_t)(paletteDef[0][0]); + const Int_t NCont = Int_t(paletteDef[0][0]); std::vector stopVec = paletteDef[1]; std::vector redsVec = paletteDef[2]; @@ -39,16 +39,16 @@ void StyleManager::setPalette(std::string configStyleName) const { std::vector bluesVec = paletteDef[4]; // get the number of colour stops and check all vectors are same size - const Int_t NRGBs = stopVec.size(); - if ((Int_t)redsVec.size() != NRGBs || (Int_t)greensVec.size() != NRGBs || - (Int_t)bluesVec.size() != NRGBs) + const size_t NRGBs = stopVec.size(); + if (redsVec.size() != NRGBs || greensVec.size() != NRGBs || + bluesVec.size() != NRGBs) { MACH3LOG_ERROR("invalid colour palettet defined in style config file: {}"); MACH3LOG_ERROR("RGB arrays dont all have the same size, please fix that"); } // now actually set the palette - TColor::CreateGradientColorTable(NRGBs, stopVec.data(), redsVec.data(), greensVec.data(), bluesVec.data(), NCont); + TColor::CreateGradientColorTable(int(NRGBs), stopVec.data(), redsVec.data(), greensVec.data(), bluesVec.data(), NCont); gStyle->SetNumberContours(NCont); } @@ -59,27 +59,27 @@ void StyleManager::setTH1Style(TH1 *hist, std::string styleName) const { if (styleDef["MarkerColor"]) { - hist->SetMarkerColor(styleDef["MarkerColor"].as()); + hist->SetMarkerColor(styleDef["MarkerColor"].as()); } if (styleDef["MarkerStyle"]) { - hist->SetMarkerStyle(styleDef["MarkerStyle"].as()); + hist->SetMarkerStyle(styleDef["MarkerStyle"].as()); } if (styleDef["FillColor"]) { - hist->SetFillColor(styleDef["FillColor"].as()); + hist->SetFillColor(styleDef["FillColor"].as()); } if (styleDef["FillStyle"]) { - hist->SetFillStyle(styleDef["FillStyle"].as()); + hist->SetFillStyle(styleDef["FillStyle"].as()); } if (styleDef["LineColor"]) { - hist->SetLineColor(styleDef["LineColor"].as()); + hist->SetLineColor(styleDef["LineColor"].as()); } if (styleDef["LineStyle"]) { - hist->SetLineStyle(styleDef["LineStyle"].as()); + hist->SetLineStyle(styleDef["LineStyle"].as()); } } diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index ff275f674..5bd30f7f5 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -2,7 +2,7 @@ ################################# pybind11 stuff ################################## ## EM: make a module target out of all the python*Module.cpp files (currently just one...) pybind11_add_module( - pyMaCh3 MODULE + _pyMaCh3 MODULE pyMaCh3.cpp plotting.cpp fitter.cpp @@ -13,6 +13,7 @@ pybind11_add_module( ) ## EM: only works with code compiled with -fPIC enabled.. I think this flag can make things slightly slower ## so would be good to find a way around this. -set_property( TARGET pyMaCh3 PROPERTY POSITION_INDEPENDENT_CODE ON ) -target_link_libraries( pyMaCh3 PUBLIC MaCh3::All ) -install( TARGETS pyMaCh3 DESTINATION pyMaCh3/) +set_property( TARGET _pyMaCh3 PROPERTY POSITION_INDEPENDENT_CODE ON ) +target_link_libraries( _pyMaCh3 PRIVATE MaCh3::All NuOscillator yaml-cpp::yaml-cpp MaCh3Warnings ) +install( DIRECTORY pyMaCh3 DESTINATION ./ ) +install( TARGETS _pyMaCh3 DESTINATION pyMaCh3/ ) \ No newline at end of file diff --git a/python/pyMaCh3.cpp b/python/pyMaCh3.cpp index 97e225538..072c7e6b3 100644 --- a/python/pyMaCh3.cpp +++ b/python/pyMaCh3.cpp @@ -10,7 +10,7 @@ void initManager(py::module &); // <- defined in python/manager.cpp void initCovariance(py::module &); // <- defined in python/covariance.cpp void initSplines(py::module &); // <- defined in python/splines.cpp -PYBIND11_MODULE( pyMaCh3, m ) { +PYBIND11_MODULE( _pyMaCh3, m ) { initPlotting(m); initFitter(m); initSamplePDF(m); diff --git a/python/pyMaCh3/__init__.py b/python/pyMaCh3/__init__.py new file mode 100644 index 000000000..9590b0f16 --- /dev/null +++ b/python/pyMaCh3/__init__.py @@ -0,0 +1,3 @@ +from ._pyMaCh3 import __doc__, fitter, manager, plotting, sample_pdf, splines, covariance + +__all__ = ["__doc__", "fitter", "manager", "plotting", "sample_pdf", "splines", "covariance"] diff --git a/python/samplePDF.cpp b/python/samplePDF.cpp index 9088c2884..1d154acbc 100644 --- a/python/samplePDF.cpp +++ b/python/samplePDF.cpp @@ -2,9 +2,7 @@ #include #include -#include "samplePDF/samplePDFBase.h" #include "samplePDF/samplePDFFDBase.h" -#include "samplePDF/FDMCStruct.h" namespace py = pybind11; @@ -254,7 +252,7 @@ void initSamplePDF(py::module &m){ .def( "get_bin_LLH", - py::overload_cast(&samplePDFBase::getTestStatLLH), + py::overload_cast(&samplePDFBase::getTestStatLLH, py::const_), "Get the LLH for a bin by comparing the data and MC. The result depends on having previously set the test statistic using :py:meth:`pyMaCh3.sample_pdf.SamplePDFFDBase.set_test_stat` \n\ :param data: The data content of the bin. \n\ :param mc: The mc content of the bin \n\ @@ -319,4 +317,4 @@ void initSamplePDF(py::module &m){ ) ; */ -} \ No newline at end of file +} diff --git a/python/splines.cpp b/python/splines.cpp index d9bba95b9..8706e90d3 100644 --- a/python/splines.cpp +++ b/python/splines.cpp @@ -10,6 +10,9 @@ // ROOT includes #include "TSpline.h" +#pragma GCC diagnostic ignored "-Wuseless-cast" +#pragma GCC diagnostic ignored "-Wfloat-conversion" + namespace py = pybind11; // As SplineBase is an abstract base class we have to do some gymnastics to get it to get it into python @@ -104,30 +107,27 @@ void initSplines(py::module &m){ throw MaCh3Exception(__FILE__, __LINE__, "Different number of x values and y values!"); } - int length = xVals.size(); + int length = int(xVals.size()); if (length == 1) { - _float_ xKnot = xVals[0]; - _float_ yKnot = yVals[0]; + M3::float_t xKnot = M3::float_t(xVals[0]); + M3::float_t yKnot = M3::float_t(yVals[0]); - std::vector<_float_ *> pars; + std::vector pars; pars.resize(3); - pars[0] = new _float_(0.0); - pars[1] = new _float_(0.0); - pars[2] = new _float_(0.0); - - return new TSpline3_red(&xKnot, &yKnot, 1, pars.data()); - + pars[0] = new M3::float_t(0.0); + pars[1] = new M3::float_t(0.0); + pars[2] = new M3::float_t(0.0); delete pars[0]; delete pars[1]; delete pars[2]; + + return new TSpline3_red(&xKnot, &yKnot, 1, pars.data()); } - else - { - TSpline3 *splineTmp = new TSpline3( "spline_tmp", xVals.data(), yVals.data(), length ); - return new TSpline3_red(splineTmp, interpType); - } + + TSpline3 *splineTmp = new TSpline3( "spline_tmp", xVals.data(), yVals.data(), length ); + return new TSpline3_red(splineTmp, interpType); } ) ) diff --git a/samplePDF/CMakeLists.txt b/samplePDF/CMakeLists.txt index 06059add4..37b9c11f2 100644 --- a/samplePDF/CMakeLists.txt +++ b/samplePDF/CMakeLists.txt @@ -2,7 +2,7 @@ set(HEADERS samplePDFBase.h samplePDFFDBase.h Structs.h - FDMCStruct.h + FarDetectorCoreInfoStruct.h ) add_library(SamplePDF SHARED @@ -11,7 +11,8 @@ add_library(SamplePDF SHARED Structs.cpp ) -target_link_libraries(SamplePDF Splines NuOscillator Covariance MaCh3CompilerOptions) +target_link_libraries(SamplePDF PUBLIC Splines NuOscillator Covariance) +target_link_libraries(SamplePDF PRIVATE MaCh3Warnings) target_include_directories(SamplePDF PUBLIC $ diff --git a/samplePDF/FDMCStruct.h b/samplePDF/FDMCStruct.h deleted file mode 100644 index 285a1916b..000000000 --- a/samplePDF/FDMCStruct.h +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -/// @brief constructors are same for all three so put in here -struct fdmc_base { - int nutype; // 2 = numu/signue | -2 = numub | 1 = nue | -1 = nueb - int oscnutype; - int nupdg; - int nupdgUnosc; - bool signal; // true if signue - int nEvents; // how many MC events are there - double ChannelIndex; - std::string flavourName; - - int **Target; // target the interaction was on - - int SampleDetID; - - //THe x_var and y_vars that you're binning in - const double** x_var; - const double** y_var; - const double **rw_etru; - const double **rw_truecz = NULL; - - /// xsec bins - std::list< int > *xsec_norms_bins; - - /// DB Speedup bits - double Unity; - float Unity_F; - int Unity_Int; - double dummy_value = -999; - - int* nxsec_norm_pointers; - const double*** xsec_norm_pointers; - - int* nxsec_spline_pointers; - const double*** xsec_spline_pointers; - - int* ntotal_weight_pointers; - const double*** total_weight_pointers; - double* total_w; - - int* XBin; - int* YBin; - int* NomXBin; - int* NomYBin; - - bool *isNC; - - // histo pdf bins - double *rw_lower_xbinedge; // lower to check if Eb has moved the erec bin - double *rw_lower_lower_xbinedge; // lower to check if Eb has moved the erec bin - double *rw_upper_xbinedge; // upper to check if Eb has moved the erec bin - double *rw_upper_upper_xbinedge; // upper to check if Eb has moved the erec bin - - double **mode; - - const _float_ **osc_w_pointer; - double *xsec_w; - splineFDBase *splineFile; -}; diff --git a/samplePDF/FarDetectorCoreInfoStruct.h b/samplePDF/FarDetectorCoreInfoStruct.h new file mode 100644 index 000000000..10b360054 --- /dev/null +++ b/samplePDF/FarDetectorCoreInfoStruct.h @@ -0,0 +1,68 @@ +#pragma once + +/// @brief constructors are same for all three so put in here +struct FarDetectorCoreInfo { + FarDetectorCoreInfo() : isNC{nullptr} {} + FarDetectorCoreInfo(FarDetectorCoreInfo const &other) = delete; + FarDetectorCoreInfo(FarDetectorCoreInfo &&other) = default; + FarDetectorCoreInfo& operator=(FarDetectorCoreInfo const &other) = delete; + FarDetectorCoreInfo& operator=(FarDetectorCoreInfo &&other) = delete; + + ~FarDetectorCoreInfo(){ delete [] isNC; } + + int nutype; // 2 = numu/signue | -2 = numub | 1 = nue | -1 = nueb + int oscnutype; + int nupdg; + int nupdgUnosc; + bool signal; // true if signue + int nEvents; // how many MC events are there + double ChannelIndex; + std::string flavourName; + + std::vector Target; // target the interaction was on + + int SampleDetID; + + //THe x_var and y_vars that you're binning in + std::vector x_var; + std::vector y_var; + std::vector rw_etru; + std::vector rw_truecz; + + /// xsec bins + std::vector< std::vector< int > > xsec_norms_bins; + + /// DB Speedup bits + double Unity; + float Unity_F; + int Unity_Int; + double dummy_value = -999; + + std::vector nxsec_norm_pointers; + std::vector> xsec_norm_pointers; + + std::vector nxsec_spline_pointers; + std::vector> xsec_spline_pointers; + + std::vector ntotal_weight_pointers; + std::vector> total_weight_pointers; + std::vector total_w; + + std::vector XBin; + std::vector YBin; + std::vector NomXBin; + std::vector NomYBin; + + bool *isNC; + + // histo pdf bins + std::vector rw_lower_xbinedge; // lower to check if Eb has moved the erec bin + std::vector rw_lower_lower_xbinedge; // lower to check if Eb has moved the erec bin + std::vector rw_upper_xbinedge; // upper to check if Eb has moved the erec bin + std::vector rw_upper_upper_xbinedge; // upper to check if Eb has moved the erec bin + + std::vector mode; + + std::vector osc_w_pointer; + std::vector xsec_w; +}; diff --git a/samplePDF/Structs.cpp b/samplePDF/Structs.cpp index 3253ac5d3..ca0ac272b 100644 --- a/samplePDF/Structs.cpp +++ b/samplePDF/Structs.cpp @@ -86,7 +86,7 @@ namespace MaCh3Utils { {7,512}, //Atm MultiGeV mu-like }); - int nKnownDetIDs = KnownDetIDsMap.size(); + int nKnownDetIDs = int(KnownDetIDsMap.size()); } @@ -147,7 +147,7 @@ double NoOverflowIntegral(TH2Poly* poly) { //WP: Helper function for projecting TH2Poly onto the X axis TH1D* PolyProjectionX(TObject* poly, std::string TempName, const std::vector& xbins, const bool computeErrors) { // ************************************************** - TH1D* hProjX = new TH1D((TempName+"_x").c_str(),(TempName+"_x").c_str(), xbins.size()-1, &xbins[0]); + TH1D* hProjX = new TH1D((TempName+"_x").c_str(),(TempName+"_x").c_str(), int(xbins.size()-1), &xbins[0]); //KS: Temp Histogram to store error, use double as this is thread safe std::vector hProjX_Error(hProjX->GetXaxis()->GetNbins() + 1, 0.0); @@ -216,7 +216,7 @@ TH1D* PolyProjectionX(TObject* poly, std::string TempName, const std::vector& ybins, const bool computeErrors) { // ************************************************** - TH1D* hProjY = new TH1D((TempName+"_y").c_str(),(TempName+"_y").c_str(),ybins.size()-1,&ybins[0]); + TH1D* hProjY = new TH1D((TempName+"_y").c_str(),(TempName+"_y").c_str(),int(ybins.size()-1),&ybins[0]); //KS: Temp Histogram to store error, use double as this is thread safe std::vector hProjY_Error(hProjY->GetXaxis()->GetNbins() + 1, 0.0); double ylow, yup, frac = 0.; @@ -300,7 +300,7 @@ TH2D* ConvertTH2PolyToTH2D(TH2Poly *poly, TH2D *h2dhist) { HistTempName += "_"; //make the th2d - TH2D *hist = (TH2D*) h2dhist->Clone(); + TH2D *hist = static_cast(h2dhist->Clone()); hist->SetNameTitle(HistTempName.c_str(), HistTempName.c_str()); for(int ix = 0; ix < hist->GetNbinsX() + 2; ix++) { @@ -310,19 +310,19 @@ TH2D* ConvertTH2PolyToTH2D(TH2Poly *poly, TH2D *h2dhist) { } //Loop over poly bins, find the corresponding th2d and setbincontent! for(int i = 0; i< poly->GetNumberOfBins(); i++){ - TH2PolyBin* polybin = (TH2PolyBin*) (poly->GetBins()->At(i)->Clone()); - xlow = polybin->GetXMin(); - xup = polybin->GetXMax(); - ylow = polybin->GetYMin(); - yup = polybin->GetYMax(); + TH2PolyBin & polybin = static_cast(*poly->GetBins()->At(i)); + xlow = polybin.GetXMin(); + xup = polybin.GetXMax(); + ylow = polybin.GetYMin(); + yup = polybin.GetYMax(); int xbin, ybin; xbin = hist->GetXaxis()->FindBin(xlow+(xup-xlow)/2); ybin = hist->GetYaxis()->FindBin(ylow+(yup-ylow)/2); MACH3LOG_TRACE("Poly bin {}, xlow: {}, xup: {}, ylow: {}, yup: {}. Finding bin for ({}, {}). Found Bin ({}, {}) with content {}. But Poly content: {}", - i, xlow, xup, ylow, yup, (xlow + (xup - xlow) / 2), (ylow + (yup - ylow) / 2), xbin, ybin, polybin->GetContent(), poly->GetBinContent(i)); - hist->SetBinContent(xbin, ybin, polybin->GetContent()); + i, xlow, xup, ylow, yup, (xlow + (xup - xlow) / 2), (ylow + (yup - ylow) / 2), xbin, ybin, polybin.GetContent(), poly->GetBinContent(i)); + hist->SetBinContent(xbin, ybin, polybin.GetContent()); } return hist; } @@ -378,7 +378,7 @@ TH2Poly* ConvertTH2DToTH2Poly(TH2D* hist) { //WP: Scale a TH2Poly and divide by bin width TH2Poly* PolyScaleWidth(TH2Poly *Histogram, double scale) { // **************** - TH2Poly* HistCopy = (TH2Poly*)(Histogram->Clone()); + TH2Poly* HistCopy = static_cast(Histogram->Clone()); double xlow, xup, ylow, yup, area; for(int i = 1; i < HistCopy->GetNumberOfBins()+1; i++) @@ -458,10 +458,10 @@ TGraphAsymmErrors* MakeAsymGraph(TH1D* sigmaArrayLeft, TH1D* sigmaArrayCentr, TH } // **************** -//DB Get the Cernekov momentum threshold in MeV +//DB Get the Cherenkov momentum threshold in MeV double returnCherenkovThresholdMomentum(int PDG) { // **************** - double refractiveIndex = 1.334; //DB From https://github.com/fiTQun/fiTQun/blob/646cf9c8ba3d4f7400bcbbde029d5ca15513a3bf/fiTQun_shared.cc#L757 + constexpr double refractiveIndex = 1.334; //DB From https://github.com/fiTQun/fiTQun/blob/646cf9c8ba3d4f7400bcbbde029d5ca15513a3bf/fiTQun_shared.cc#L757 double mass = MaCh3Utils::GetMassFromPDG(PDG)*1e3; double momentumThreshold = mass/sqrt(refractiveIndex*refractiveIndex-1.); return momentumThreshold; @@ -471,7 +471,7 @@ double returnCherenkovThresholdMomentum(int PDG) { // Recalculate Q^2 after Eb shift. Takes in shifted lepton momentum, lepton angle, and true neutrino energy double CalculateQ2(double PLep, double PUpd, double EnuTrue, double InitialQ2){ // *************************************************************************** - const double MLep = 0.10565837; + constexpr double MLep = 0.10565837; // Caluclate muon energy double ELep = sqrt((MLep*MLep)+(PLep*PLep)); diff --git a/samplePDF/Structs.h b/samplePDF/Structs.h index 02efe3659..9fd181c9a 100644 --- a/samplePDF/Structs.h +++ b/samplePDF/Structs.h @@ -3,18 +3,20 @@ /// Run low or high memory versions of structs /// N.B. for 64 bit systems sizeof(float) == sizeof(double) so not a huge effect /// KS: Need more testing on FD +namespace M3 { #ifdef _LOW_MEMORY_STRUCTS_ /// Custom floating point (float or double) -#define _float_ float +using float_t = float; /// Custom integer (int or short int) -#define _int_ short int +using int_t = short; /// Custom unsigned integer (unsigned short int or unsigned int) -#define _unsigned_int_ unsigned short int +using uint_t = unsigned short; #else -#define _float_ double -#define _int_ int -#define _unsigned_int_ unsigned int +using float_t = double; +using int_t = int; +using uint_t = unsigned; #endif +} /// KS: noexcept can help with performance but is terrible for debugging, this is meant to help easy way of of turning it on or off. In near future move this to struct or other central class. //#define SafeException @@ -34,11 +36,10 @@ /// Number of overflow bins in TH2Poly, #define _TH2PolyOverflowBins_ 9 -/// Include some healthy defines for constructors -#define _BAD_DOUBLE_ -999.99 -#define _BAD_INT_ -999 +constexpr static const double _BAD_DOUBLE_ = -999.99; +constexpr static const int _BAD_INT_ = -999; -#define _DEFAULT_RETURN_VAL_ -999999.123456 +constexpr static const double _DEFAULT_RETURN_VAL_ = -999999.123456; // C++ includes #include diff --git a/samplePDF/samplePDFBase.cpp b/samplePDF/samplePDFBase.cpp index d6a2fc2b6..8cd6f8995 100644 --- a/samplePDF/samplePDFBase.cpp +++ b/samplePDF/samplePDFBase.cpp @@ -99,9 +99,13 @@ void samplePDFBase::addData(TH2D* binneddata) std::vector samplePDFBase::generate() { std::vector data; - TH1D *pdf = (TH1D*)get1DHist(); + TH1D *pdf = get1DHist(); double evrate = getEventRate(); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" int num = rnd->Poisson(evrate); +#pragma GCC diagnostic pop std::cout << std::endl << "sampling " << num << " events from " << evrate << std::endl; // rejection sampling @@ -136,7 +140,7 @@ std::vector samplePDFBase::generate() std::vector< std::vector > samplePDFBase::generate2D(TH2D* pdf) { std::vector< std::vector > data; - if(!pdf) pdf = (TH2D*)get2DHist(); + if(!pdf) pdf = get2DHist(); if(MCthrow) { @@ -150,7 +154,10 @@ std::vector< std::vector > samplePDFBase::generate2D(TH2D* pdf) } double evrate = pdf->Integral(); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" int num = rnd->Poisson(evrate); +#pragma GCC diagnostic pop std::cout << "sampling " << num << " events from " << evrate << std::endl; std::vector var1; @@ -193,7 +200,7 @@ double samplePDFBase::getEventRate() // *************************************************************************** // Poisson likelihood calc for data and MC event rates -double samplePDFBase::getTestStatLLH(const double data, const double mc) { +double samplePDFBase::getTestStatLLH(const double data, const double mc) const { // *************************************************************************** // Need some MC if(mc == 0) return 0.; @@ -211,7 +218,7 @@ double samplePDFBase::getTestStatLLH(const double data, const double mc) { // ************************* // data is data, mc is mc, w2 is Sum(w_{i}^2) (sum of weights squared), which is sigma^2_{MC stats} -double samplePDFBase::getTestStatLLH(const double data, const double mc, const double w2) { +double samplePDFBase::getTestStatLLH(const double data, const double mc, const double w2) const { // ************************* // Need some MC @@ -316,7 +323,7 @@ double samplePDFBase::getTestStatLLH(const double data, const double mc, const d const long double a = mc*b+1; const long double k = data; // Use C99's implementation of log of gamma function to not be C++11 dependent - stat = -1*(a * logl(b) + lgammal(k+a) - lgammal(k+(long double)1) - ((k+a)*log1pl(b)) - lgammal(a)); + stat = double(-1*(a * logl(b) + lgammal(k+a) - lgammal(k+1) - ((k+a)*log1pl(b)) - lgammal(a))); // Return the statistical contribution and penalty return stat; diff --git a/samplePDF/samplePDFBase.h b/samplePDF/samplePDFBase.h index de9d3e525..d9a23a143 100644 --- a/samplePDF/samplePDFBase.h +++ b/samplePDF/samplePDFBase.h @@ -25,7 +25,7 @@ class samplePDFBase /// @brief destructor virtual ~samplePDFBase(); - virtual inline _int_ GetNsamples(){ return nSamples; }; + virtual inline M3::int_t GetNsamples(){ return nSamples; }; virtual inline std::string GetName()const {return "samplePDF";}; virtual std::string GetSampleName(int Sample); virtual inline double getSampleLikelihood(const int isample){(void) isample; return GetLikelihood();}; @@ -65,7 +65,7 @@ class samplePDFBase virtual void setMCMCBranches(TTree *outtree) {(void)outtree;}; // WARNING KS: Needed for sigma var - virtual void SetupBinning(const _int_ Selection, std::vector &BinningX, std::vector &BinningY){ + virtual void SetupBinning(const M3::int_t Selection, std::vector &BinningX, std::vector &BinningY){ (void) Selection; (void) BinningX; (void) BinningY; throw MaCh3Exception(__FILE__ , __LINE__ , "Not implemented");} virtual TH1* getData(const int Selection) { (void) Selection; throw MaCh3Exception(__FILE__ , __LINE__ , "Not implemented"); } virtual TH2Poly* getW2(const int Selection){ (void) Selection; throw MaCh3Exception(__FILE__ , __LINE__ , "Not implemented");} @@ -75,12 +75,12 @@ class samplePDFBase virtual inline std::string GetKinVarLabel(const int sample, const int Dimension) { (void) sample; (void) Dimension; throw MaCh3Exception(__FILE__ , __LINE__ , "Not implemented"); }; - double getTestStatLLH(double data, double mc); + double getTestStatLLH(double data, double mc) const; /// @brief Calculate test statistic for a single bin. Calculation depends on setting of fTestStatistic /// @param data is data /// @param mc is mc /// @param w2 is is Sum(w_{i}^2) (sum of weights squared), which is sigma^2_{MC stats} - double getTestStatLLH(const double data, const double mc, const double w2); + double getTestStatLLH(const double data, const double mc, const double w2) const; /// @brief Set the test statistic to be used when calculating the binned likelihoods /// @param testStat The test statistic to use. inline void SetTestStatistic(TestStatistic testStat){ fTestStatistic = testStat; } @@ -110,7 +110,7 @@ class samplePDFBase std::vector< std::vector >* dataSample2D; /// Contains how many samples we've got - _int_ nSamples; + M3::int_t nSamples; /// KS: number of dimension for this sample int nDims; /// Name of Sample diff --git a/samplePDF/samplePDFFDBase.cpp b/samplePDF/samplePDFFDBase.cpp index 2f69dca5d..8e8ef0965 100644 --- a/samplePDF/samplePDFFDBase.cpp +++ b/samplePDF/samplePDFFDBase.cpp @@ -1,7 +1,10 @@ #include "samplePDFFDBase.h" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfloat-conversion" #include "Oscillator/OscillatorFactory.h" #include "Constants/OscillatorConstants.h" +#pragma GCC diagnostic pop #include @@ -10,9 +13,9 @@ samplePDFFDBase::samplePDFFDBase(std::string ConfigFileName, covarianceXsec* xse MACH3LOG_INFO("-------------------------------------------------------------------"); MACH3LOG_INFO("Ceating SamplePDFFDBase object"); - //ETA - safety feature so you can't pass a NULL xsec_cov - if(xsec_cov == NULL){ - MACH3LOG_ERROR("You've passed me a NULL xsec covariance matrix... I need this to setup splines!"); + //ETA - safety feature so you can't pass a nullptr xsec_cov + if(xsec_cov == nullptr){ + MACH3LOG_ERROR("You've passed me a nullptr xsec covariance matrix... I need this to setup splines!"); throw MaCh3Exception(__FILE__, __LINE__); } SetXsecCov(xsec_cov); @@ -47,38 +50,30 @@ samplePDFFDBase::~samplePDFFDBase() void samplePDFFDBase::ReadSampleConfig() { - if (CheckNodeExists(SampleManager->raw(), "SampleName")) { - samplename = SampleManager->raw()["SampleName"].as(); - } else{ + if (!CheckNodeExists(SampleManager->raw(), "SampleName")) { MACH3LOG_ERROR("SampleName not defined in {}, please add this!", SampleManager->GetFileName()); throw MaCh3Exception(__FILE__, __LINE__); } + samplename = SampleManager->raw()["SampleName"].as(); - if (CheckNodeExists(SampleManager->raw(), "NSubSamples")) { - nSamples = SampleManager->raw()["NSubSamples"].as(); - } else{ + if (!CheckNodeExists(SampleManager->raw(), "NSubSamples")) { MACH3LOG_ERROR("NSubSamples not defined in {}, please add this!", SampleManager->GetFileName()); throw MaCh3Exception(__FILE__, __LINE__); } + nSamples = SampleManager->raw()["NSubSamples"].as(); - if (CheckNodeExists(SampleManager->raw(), "DetID")) { - SampleDetID = SampleManager->raw()["DetID"].as(); - } else{ + if (!CheckNodeExists(SampleManager->raw(), "DetID")) { MACH3LOG_ERROR("ID not defined in {}, please add this!", SampleManager->GetFileName()); throw MaCh3Exception(__FILE__, __LINE__); } + SampleDetID = SampleManager->raw()["DetID"].as(); - if (CheckNodeExists(SampleManager->raw(), "NuOsc", "NuOscConfigFile")) { - NuOscillatorConfigFile = SampleManager->raw()["NuOsc"]["NuOscConfigFile"].as(); - } else { + if (!CheckNodeExists(SampleManager->raw(), "NuOsc", "NuOscConfigFile")) { MACH3LOG_ERROR("NuOsc::NuOscConfigFile is not defined in {}, please add this!", SampleManager->GetFileName()); throw MaCh3Exception(__FILE__, __LINE__); } - - for (int i=0;iraw()["NuOsc"]["NuOscConfigFile"].as(); + MCSamples.resize(nSamples); //Default TestStatistic is kPoisson //ETA: this can be configured with samplePDFBase::SetTestStatistic() @@ -160,7 +155,7 @@ void samplePDFFDBase::ReadSampleConfig() SelectionVec = {KinematicParamter, low_bound, up_bound}; StoredSelection.push_back(SelectionVec); } - NSelections = SelectionStr.size(); + NSelections = int(SelectionStr.size()); return; } @@ -174,7 +169,7 @@ void samplePDFFDBase::Initialise() { Init(); int TotalMCEvents = 0; - for(_int_ iSample=0 ; iSample < nSamples ; iSample++){ + for(M3::int_t iSample=0 ; iSample < nSamples ; iSample++){ MACH3LOG_INFO("============================================="); MACH3LOG_INFO("Initialising sample: {}/{}", iSample, nSamples); MCSamples[iSample].nEvents = setupExperimentMC(iSample); @@ -352,11 +347,14 @@ void samplePDFFDBase::reweight() // Reweight function - Depending on Osc Calcula //KS: Reset the histograms before reweight ResetHistograms(); - std::vector<_float_> OscVec(OscCov->GetNumParams()); + std::vector OscVec(OscCov->GetNumParams()); for (int iPar=0;iParGetNumParams();iPar++) { - OscVec[iPar] = OscCov->getParProp(iPar); + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wuseless-cast" + OscVec[iPar] = M3::float_t(OscCov->getParProp(iPar)); + #pragma GCC diagnostic pop } - for (int iSample=0;iSample<(int)MCSamples.size();iSample++) { + for (int iSample=0;iSampleCalculateProbabilities(OscVec); } @@ -384,8 +382,8 @@ void samplePDFFDBase::fillArray() { #else //ETA we should probably store this in samplePDFFDBase - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); //DB Reset values stored in PDF array to 0. for (int yBin=0;yBinEvaluate(); + if(SplineHandler){ + SplineHandler->Evaluate(); } for (unsigned int iSample=0;iSample= MCSamples[iSample].rw_lower_xbinedge[iEvent]) { - XBinToFill = MCSamples[iSample].NomXBin[iEvent]; + XBinToFill = MCSamples[iSample].NomXBin[iEvent]; } //DB - Second, check to see if the event is outside of the binning range and skip event if it is //ETA- note that nXBins is XBinEdges.size() - 1 else if (XVar < XBinEdges[0] || XVar >= XBinEdges[nXBins]) { - continue; + continue; } //DB - Thirdly, check the adjacent bins first as Eb+CC+EScale shifts aren't likely to move an Erec more than 1bin width //Shifted down one bin from the event bin at nominal else if (XVar < MCSamples[iSample].rw_lower_xbinedge[iEvent] && XVar >= MCSamples[iSample].rw_lower_lower_xbinedge[iEvent]) { - XBinToFill = MCSamples[iSample].NomXBin[iEvent]-1; + XBinToFill = MCSamples[iSample].NomXBin[iEvent]-1; } //Shifted up one bin from the event bin at nominal else if (XVar < MCSamples[iSample].rw_upper_upper_xbinedge[iEvent] && XVar >= MCSamples[iSample].rw_upper_xbinedge[iEvent]) { - XBinToFill = MCSamples[iSample].NomXBin[iEvent]+1; + XBinToFill = MCSamples[iSample].NomXBin[iEvent]+1; } //DB - If we end up in this loop, the event has been shifted outside of its nominal bin, but is still within the allowed binning range else { - for (unsigned int iBin=0;iBin<(XBinEdges.size()-1);iBin++) { - if (XVar >= XBinEdges[iBin] && XVar < XBinEdges[iBin+1]) { - XBinToFill = iBin; - } - } + for (unsigned int iBin=0;iBin<(XBinEdges.size()-1);iBin++) + { + if (XVar >= XBinEdges[iBin] && XVar < XBinEdges[iBin+1]) { + XBinToFill = iBin; + } + } } //DB Fill relevant part of thread array @@ -500,12 +499,12 @@ void samplePDFFDBase::fillArray() { // ************************************************ void samplePDFFDBase::fillArray_MP() { - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + size_t nXBins = int(XBinEdges.size()-1); + size_t nYBins = int(YBinEdges.size()-1); //DB Reset values stored in PDF array to 0. - for (int yBin=0;yBinEvaluate(); + if(SplineHandler){ + SplineHandler->Evaluate(); } for (unsigned int iSample=0;iSample::iterator lit = MCSamples[iSample].xsec_norms_bins[iEvent].begin();lit!=MCSamples[iSample].xsec_norms_bins[iEvent].end();lit++) { - MCSamples[iSample].xsec_norm_pointers[iEvent][counter] = XsecCov->retPointer(*lit); + for(auto const & norm_bin: MCSamples[iSample].xsec_norms_bins[iEvent]) { + MCSamples[iSample].xsec_norm_pointers[iEvent][counter] = XsecCov->retPointer(norm_bin); counter += 1; } @@ -785,10 +784,10 @@ void samplePDFFDBase::SetupNormParameters(){ //A way to check whether a normalisation parameter applies to an event or not void samplePDFFDBase::CalcXsecNormsBins(int iSample){ - fdmc_base *fdobj = &MCSamples[iSample]; + FarDetectorCoreInfo *fdobj = &MCSamples[iSample]; for(int iEvent=0; iEvent < fdobj->nEvents; ++iEvent){ - std::list< int > XsecBins = {}; + std::vector< int > XsecBins = {}; if (XsecCov) { for (std::vector::iterator it = xsec_norms.begin(); it != xsec_norms.end(); ++it) { // Skip oscillated NC events @@ -892,8 +891,8 @@ void samplePDFFDBase::CalcXsecNormsBins(int iSample){ void samplePDFFDBase::set1DBinning(std::vector &XVec){ _hPDF1D->Reset(); - _hPDF1D->SetBins(XVec.size()-1, XVec.data()); - dathist->SetBins(XVec.size()-1, XVec.data()); + _hPDF1D->SetBins(int(XVec.size()-1), XVec.data()); + dathist->SetBins(int(XVec.size()-1), XVec.data()); //This will overwrite XBinEdges with whatever you pass this function XBinEdges = XVec; @@ -902,11 +901,11 @@ void samplePDFFDBase::set1DBinning(std::vector &XVec){ YBinEdges[1] = 1e8; _hPDF2D->Reset(); - _hPDF2D ->SetBins(XVec.size()-1, XVec.data(), YBinEdges.size()-1, YBinEdges.data()); - dathist2d->SetBins(XVec.size()-1, XVec.data(), YBinEdges.size()-1, YBinEdges.data()); + _hPDF2D ->SetBins(int(XVec.size()-1), XVec.data(), int(YBinEdges.size()-1), YBinEdges.data()); + dathist2d->SetBins(int(XVec.size()-1), XVec.data(), int(YBinEdges.size()-1), YBinEdges.data()); - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -926,19 +925,19 @@ void samplePDFFDBase::set1DBinning(std::vector &XVec){ void samplePDFFDBase::set2DBinning(std::vector &XVec, std::vector &YVec) { _hPDF1D->Reset(); - _hPDF1D->SetBins(XVec.size()-1, XVec.data()); - dathist->SetBins(XVec.size()-1, XVec.data()); + _hPDF1D->SetBins(int(XVec.size()-1), XVec.data()); + dathist->SetBins(int(XVec.size()-1), XVec.data()); _hPDF2D->Reset(); - _hPDF2D->SetBins(XVec.size()-1, XVec.data(), YVec.size()-1, YVec.data()); - dathist2d->SetBins(XVec.size()-1, XVec.data(), YVec.size()-1, YVec.data()); + _hPDF2D->SetBins(int(XVec.size()-1), XVec.data(), int(YVec.size()-1), YVec.data()); + dathist2d->SetBins(int(XVec.size()-1), XVec.data(), int(YVec.size()-1), YVec.data()); //XBinEdges = XVec; //YBinEdges = YVec; //ETA - maybe need to be careful here - int nXBins = XVec.size()-1; - int nYBins = YVec.size()-1; + int nXBins = int(XVec.size()-1); + int nYBins = int(YVec.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -981,8 +980,8 @@ void samplePDFFDBase::set1DBinning(int nbins, double* boundaries) _hPDF2D->SetBins(nbins,boundaries,1,YBinEdges_Arr); dathist2d->SetBins(nbins,boundaries,1,YBinEdges_Arr); - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -1016,8 +1015,8 @@ void samplePDFFDBase::set1DBinning(int nbins, double low, double high) _hPDF2D->SetBins(nbins,low,high,1,YBinEdges[0],YBinEdges[1]); dathist2d->SetBins(nbins,low,high,1,YBinEdges[0],YBinEdges[1]); - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -1036,7 +1035,7 @@ void samplePDFFDBase::set1DBinning(int nbins, double low, double high) void samplePDFFDBase::FindNominalBinAndEdges1D() { //Set rw_pdf_bin and rw_upper_xbinedge and rw_lower_xbinedge for each skmc_base - for(int mc_i = 0 ; mc_i < (int)MCSamples.size() ; mc_i++){ + for(int mc_i = 0 ; mc_i < int(MCSamples.size()) ; mc_i++){ for(int event_i = 0 ; event_i < MCSamples[mc_i].nEvents ; event_i++){ //Set x_var and y_var values based on XVarStr and YVarStr @@ -1063,14 +1062,14 @@ void samplePDFFDBase::FindNominalBinAndEdges1D() { } if ((bin-1) >= 0 && (bin-1) < int(XBinEdges.size()-1)) { - MCSamples[mc_i].NomXBin[event_i] = bin-1; - } else { - MCSamples[mc_i].NomXBin[event_i] = -1; - low_edge = _DEFAULT_RETURN_VAL_; - upper_edge = _DEFAULT_RETURN_VAL_; - low_lower_edge = _DEFAULT_RETURN_VAL_; - upper_upper_edge = _DEFAULT_RETURN_VAL_; - } + MCSamples[mc_i].NomXBin[event_i] = bin-1; + } else { + MCSamples[mc_i].NomXBin[event_i] = -1; + low_edge = _DEFAULT_RETURN_VAL_; + upper_edge = _DEFAULT_RETURN_VAL_; + low_lower_edge = _DEFAULT_RETURN_VAL_; + upper_upper_edge = _DEFAULT_RETURN_VAL_; + } MCSamples[mc_i].NomYBin[event_i] = 0; MCSamples[mc_i].rw_lower_xbinedge[event_i] = low_edge; @@ -1101,8 +1100,8 @@ void samplePDFFDBase::set2DBinning(int nbins1, double* boundaries1, int nbins2, YBinEdges[i] = _hPDF2D->GetYaxis()->GetBinLowEdge(i+1); } - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -1137,8 +1136,8 @@ void samplePDFFDBase::set2DBinning(int nbins1, double low1, double high1, int nb YBinEdges[i] = _hPDF2D->GetYaxis()->GetBinLowEdge(i+1); } - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_array = new double*[nYBins]; samplePDFFD_array_w2 = new double*[nYBins]; @@ -1157,7 +1156,7 @@ void samplePDFFDBase::set2DBinning(int nbins1, double low1, double high1, int nb void samplePDFFDBase::FindNominalBinAndEdges2D() { //Set rw_pdf_bin and rw_upper_xbinedge and rw_lower_xbinedge for each skmc_base - for(int mc_i = 0 ; mc_i < (int)MCSamples.size() ; mc_i++){ + for(int mc_i = 0 ; mc_i < int(MCSamples.size()) ; mc_i++){ for(int event_i = 0 ; event_i < MCSamples[mc_i].nEvents ; event_i++){ //Set x_var and y_var values based on XVarStr and YVarStr @@ -1214,8 +1213,8 @@ void samplePDFFDBase::FindNominalBinAndEdges2D() { void samplePDFFDBase::addData(std::vector &data) { dataSample = new std::vector(data); - dataSample2D = NULL; - dathist2d = NULL; + dataSample2D = nullptr; + dathist2d = nullptr; dathist->Reset(); if (GetNDim()!=1) { @@ -1228,8 +1227,8 @@ void samplePDFFDBase::addData(std::vector &data) { dathist->Fill(dataSample->at(i)); } - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_data = new double*[nYBins]; for (int yBin=0;yBin &data) { void samplePDFFDBase::addData(std::vector< std::vector > &data) { dataSample2D = new std::vector< std::vector >(data); - dataSample = NULL; - dathist = NULL; + dataSample = nullptr; + dathist = nullptr; dathist2d->Reset(); if (GetNDim()!=2) { @@ -1258,8 +1257,8 @@ void samplePDFFDBase::addData(std::vector< std::vector > &data) { dathist2d->Fill(dataSample2D->at(0)[i],dataSample2D->at(1)[i]); } - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_data = new double*[nYBins]; for (int yBin=0;yBin > &data) { void samplePDFFDBase::addData(TH1D* Data) { MACH3LOG_INFO("Adding 1D data histogram : {} with {} events", Data->GetName(), Data->Integral()); - dathist2d = NULL; + dathist2d = nullptr; dathist = Data; - dataSample = NULL; - dataSample2D = NULL; + dataSample = nullptr; + dataSample2D = nullptr; if (GetNDim()!=1) { MACH3LOG_ERROR("Trying to set a 1D 'data' histogram in a 2D sample - Quitting"); throw MaCh3Exception(__FILE__ , __LINE__ );} - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_data = new double*[nYBins]; for (int yBin=0;yBinGetName(), Data->Integral()); dathist2d = Data; - dathist = NULL; - dataSample = NULL; - dataSample2D = NULL; + dathist = nullptr; + dataSample = nullptr; + dataSample2D = nullptr; if (GetNDim()!=2) { MACH3LOG_ERROR("Trying to set a 2D 'data' histogram in a 1D sample - Quitting"); throw MaCh3Exception(__FILE__ , __LINE__ );} - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); samplePDFFD_data = new double*[nYBins]; for (int yBin=0;yBin((int)MCSamples.size()); - for (int iSample=0;iSample<(int)MCSamples.size();iSample++) { + NuOscProbCalcers = std::vector(int(MCSamples.size())); + for (size_t iSample=0;iSampleCreateOscillator(NuOscillatorConfigFile); if (!NuOscProbCalcers[iSample]->EvalPointsSetInConstructor()) { - std::vector<_float_> EnergyArray; - for (int iEvent=0;iEvent<(int)MCSamples[iSample].nEvents;iEvent++) { + std::vector EnergyArray; + for (int iEvent=0;iEvent CosineZArray; - for (int iEvent=0;iEvent<(int)MCSamples[iSample].nEvents;iEvent++) { + if (MCSamples[iSample].rw_truecz.size() > 0 && int(MCSamples[iSample].rw_truecz.size()) == MCSamples[iSample].nEvents) { //Can only happen if truecz has been initialised within the experiment specific code + std::vector CosineZArray; + for (int iEvent=0;iEventSetup(); - for (int iEvent=0;iEvent<(int)MCSamples[iSample].nEvents;iEvent++) { + for (int iEvent=0;iEvent 0) { //Can only happen if truecz has been initialised within the experiment specific code //Atmospherics - MCSamples[iSample].osc_w_pointer[iEvent] = NuOscProbCalcers[iSample]->ReturnWeightPointer(InitFlav,FinalFlav,*(MCSamples[iSample].rw_etru[iEvent]),*(MCSamples[iSample].rw_truecz[iEvent])); + MCSamples[iSample].osc_w_pointer[iEvent] = NuOscProbCalcers[iSample]->ReturnWeightPointer(InitFlav,FinalFlav,FLOAT_T(*(MCSamples[iSample].rw_etru[iEvent])),FLOAT_T(*(MCSamples[iSample].rw_truecz[iEvent]))); } else { //Beam - MCSamples[iSample].osc_w_pointer[iEvent] = NuOscProbCalcers[iSample]->ReturnWeightPointer(InitFlav,FinalFlav,*(MCSamples[iSample].rw_etru[iEvent])); + MCSamples[iSample].osc_w_pointer[iEvent] = NuOscProbCalcers[iSample]->ReturnWeightPointer(InitFlav,FinalFlav,FLOAT_T(*(MCSamples[iSample].rw_etru[iEvent]))); } } // end if NC } // end loop over events @@ -1431,6 +1430,9 @@ void samplePDFFDBase::SetupNuOscillator() { double samplePDFFDBase::GetEventWeight(int iSample, int iEntry) { double totalweight = 1.0; + #ifdef MULTITHREAD + #pragma omp simd + #endif for (int iParam=0;iParam > EventSplines; switch(nDimensions){ - case 1: - EventSplines = splineFile->GetEventSplines(GetName(), i, *(MCSamples[i].mode[j]), *(MCSamples[i].rw_etru[j]), *(MCSamples[i].x_var[j]), 0.); - break; - case 2: - EventSplines = splineFile->GetEventSplines(GetName(), i, *(MCSamples[i].mode[j]), *(MCSamples[i].rw_etru[j]), *(MCSamples[i].x_var[j]), *(MCSamples[i].y_var[j])); - break; - default: - MACH3LOG_ERROR("Error in assigning spline bins because nDimensions = {}", nDimensions); - MACH3LOG_ERROR("MaCh3 only supports splines binned in Etrue + the sample binning"); - MACH3LOG_ERROR("Please check the sample binning you specified in your sample config "); - break; + case 1: + EventSplines = SplineHandler->GetEventSplines(GetName(), i, int(*(MCSamples[i].mode[j])), *(MCSamples[i].rw_etru[j]), *(MCSamples[i].x_var[j]), 0.); + break; + case 2: + EventSplines = SplineHandler->GetEventSplines(GetName(), i, int(*(MCSamples[i].mode[j])), *(MCSamples[i].rw_etru[j]), *(MCSamples[i].x_var[j]), *(MCSamples[i].y_var[j])); + break; + default: + MACH3LOG_ERROR("Error in assigning spline bins because nDimensions = {}", nDimensions); + MACH3LOG_ERROR("MaCh3 only supports splines binned in Etrue + the sample binning"); + MACH3LOG_ERROR("Please check the sample binning you specified in your sample config "); + break; } - MCSamples[i].nxsec_spline_pointers[j] = EventSplines.size(); + MCSamples[i].nxsec_spline_pointers[j] = int(EventSplines.size()); if(MCSamples[i].nxsec_spline_pointers[j] < 0){ - throw MaCh3Exception(__FILE__, __LINE__); + throw MaCh3Exception(__FILE__, __LINE__); } - MCSamples[i].xsec_spline_pointers[j] = new const double*[MCSamples[i].nxsec_spline_pointers[j]]; + MCSamples[i].xsec_spline_pointers[j].resize(MCSamples[i].nxsec_spline_pointers[j]); for(int spline=0; splineretPointer(EventSplines[spline][0], EventSplines[spline][1], EventSplines[spline][2], - EventSplines[spline][3], EventSplines[spline][4], EventSplines[spline][5], EventSplines[spline][6]); + //Event Splines indexed as: sample name, oscillation channel, syst, mode, etrue, var1, var2 (var2 is a dummy 0 for 1D splines) + MCSamples[i].xsec_spline_pointers[j][spline] = SplineHandler->retPointer(EventSplines[spline][0], EventSplines[spline][1], EventSplines[spline][2], + EventSplines[spline][3], EventSplines[spline][4], EventSplines[spline][5], EventSplines[spline][6]); } } } - + return; } double samplePDFFDBase::GetLikelihood() { - if (samplePDFFD_data == NULL) { + if (samplePDFFD_data == nullptr) { MACH3LOG_ERROR("Data sample is empty! Can't calculate a likelihood!"); throw MaCh3Exception(__FILE__, __LINE__); } //This can be done only once and stored - int nXBins = XBinEdges.size()-1; - int nYBins = YBinEdges.size()-1; + int nXBins = int(XBinEdges.size()-1); + int nYBins = int(YBinEdges.size()-1); int xBin; int yBin; @@ -1514,8 +1516,7 @@ void samplePDFFDBase::InitialiseSingleFDMCObject(int iSample, int nEvents_) { throw MaCh3Exception(__FILE__, __LINE__); } - MCSamples[iSample] = fdmc_base(); - fdmc_base *fdobj = &MCSamples[iSample]; + FarDetectorCoreInfo *fdobj = &MCSamples[iSample]; fdobj->nEvents = nEvents_; fdobj->nutype = -9; @@ -1524,59 +1525,41 @@ void samplePDFFDBase::InitialiseSingleFDMCObject(int iSample, int nEvents_) { fdobj->Unity = 1.; fdobj->Unity_Int = 1.; - fdobj->x_var = new const double*[fdobj->nEvents]; - fdobj->y_var = new const double*[fdobj->nEvents]; - fdobj->rw_etru = new const double*[fdobj->nEvents]; - fdobj->XBin = new int[fdobj->nEvents]; - fdobj->YBin = new int[fdobj->nEvents]; - fdobj->NomXBin = new int[fdobj->nEvents]; - fdobj->NomYBin = new int[fdobj->nEvents]; - fdobj->XBin = new int [fdobj->nEvents]; - fdobj->YBin = new int [fdobj->nEvents];; - fdobj->rw_lower_xbinedge = new double [fdobj->nEvents]; - fdobj->rw_lower_lower_xbinedge = new double [fdobj->nEvents]; - fdobj->rw_upper_xbinedge = new double [fdobj->nEvents]; - fdobj->rw_upper_upper_xbinedge = new double [fdobj->nEvents]; - fdobj->mode = new double*[fdobj->nEvents]; - fdobj->nxsec_norm_pointers = new int[fdobj->nEvents]; - fdobj->xsec_norm_pointers = new const double**[fdobj->nEvents]; - fdobj->xsec_norms_bins = new std::list< int >[fdobj->nEvents]; - fdobj->xsec_w = new double[fdobj->nEvents]; - fdobj->isNC = new bool[fdobj->nEvents]; - fdobj->nxsec_spline_pointers = new int[fdobj->nEvents]; - fdobj->xsec_spline_pointers = new const double**[fdobj->nEvents]; - fdobj->ntotal_weight_pointers = new int[fdobj->nEvents]; - fdobj->total_weight_pointers = new const double**[fdobj->nEvents]; - fdobj->Target = new int*[fdobj->nEvents]; - fdobj->osc_w_pointer = new const _float_*[fdobj->nEvents]; - //fdobj->rw_truecz = new const double*[fdobj->nEvents]; - - for(int iEvent = 0 ;iEvent < fdobj->nEvents ; ++iEvent){ - fdobj->rw_etru[iEvent] = &fdobj->Unity; - //fdobj->rw_truecz[iEvent] = &fdobj->Unity; - fdobj->mode[iEvent] = &fdobj->Unity; - fdobj->Target[iEvent] = 0; - fdobj->NomXBin[iEvent] = -1; - fdobj->NomYBin[iEvent] = -1; - fdobj->XBin[iEvent] = -1; - fdobj->YBin[iEvent] = -1; - fdobj->rw_lower_xbinedge[iEvent] = -1; - fdobj->rw_lower_lower_xbinedge[iEvent] = -1; - fdobj->rw_upper_xbinedge[iEvent] = -1; - fdobj->rw_upper_upper_xbinedge[iEvent] = -1; - fdobj->xsec_w[iEvent] = 1.0; - fdobj->isNC[iEvent] = false; - fdobj->SampleDetID = -1; - #ifdef _LOW_MEMORY_STRUCTS_ - fdobj->osc_w_pointer[iEvent] = &(fdobj->Unity_F); - #else - fdobj->osc_w_pointer[iEvent] = &(fdobj->Unity); - #endif + int nEvents = fdobj->nEvents; + fdobj->x_var.resize(nEvents, &fdobj->Unity); + fdobj->y_var.resize(nEvents, &fdobj->Unity); + fdobj->rw_etru.resize(nEvents, &fdobj->Unity); + fdobj->XBin.resize(nEvents, -1); + fdobj->YBin.resize(nEvents, -1); + fdobj->NomXBin.resize(nEvents, -1); + fdobj->NomYBin.resize(nEvents, -1); + fdobj->rw_lower_xbinedge.resize(nEvents, -1); + fdobj->rw_lower_lower_xbinedge.resize(nEvents, -1); + fdobj->rw_upper_xbinedge.resize(nEvents, -1); + fdobj->rw_upper_upper_xbinedge.resize(nEvents, -1); + fdobj->mode.resize(nEvents, &fdobj->Unity); + fdobj->nxsec_norm_pointers.resize(nEvents); + fdobj->xsec_norm_pointers.resize(nEvents); + fdobj->xsec_norms_bins.resize(nEvents); + fdobj->xsec_w.resize(nEvents, 1.0); + fdobj->isNC = new bool[nEvents]; + fdobj->nxsec_spline_pointers.resize(nEvents); + fdobj->xsec_spline_pointers.resize(nEvents); + fdobj->ntotal_weight_pointers.resize(nEvents); + fdobj->total_weight_pointers.resize(nEvents); + fdobj->Target.resize(nEvents, 0); +#ifdef _LOW_MEMORY_STRUCTS_ + fdobj->osc_w_pointer.resize(nEvents, &fdobj->Unity_F); +#else + fdobj->osc_w_pointer.resize(nEvents, &fdobj->Unity); +#endif + fdobj->SampleDetID = -1; - fdobj->x_var[iEvent] = &fdobj->Unity; - fdobj->y_var[iEvent] = &fdobj->Unity; + for(int iEvent = 0 ; iEvent < fdobj->nEvents ; ++iEvent){ + fdobj->isNC[iEvent] = false; } + return; } void samplePDFFDBase::InitialiseSplineObject() { @@ -1594,72 +1577,13 @@ void samplePDFFDBase::InitialiseSplineObject() { SplineVarNames.push_back(YVarStr); } - splineFile->AddSample(samplename, SampleDetID, spline_filepaths, SplineVarNames); - splineFile->PrintArrayDimension(); - splineFile->CountNumberOfLoadedSplines(false, 1); - splineFile->TransferToMonolith(); + SplineHandler->AddSample(samplename, SampleDetID, spline_filepaths, SplineVarNames); + SplineHandler->PrintArrayDimension(); + SplineHandler->CountNumberOfLoadedSplines(false, 1); + SplineHandler->TransferToMonolith(); MACH3LOG_INFO("--------------------------------"); MACH3LOG_INFO("Setup Far Detector splines"); fillSplineBins(); } - -TH1* samplePDFFDBase::get1DVarHist(std::string ProjectionVar_Str, std::vector< std::vector > SelectionVec, int WeightStyle, TAxis* Axis) { - //DB Grab the associated enum with the argument string - int ProjectionVar_Int = ReturnKinematicParameterFromString(ProjectionVar_Str); - - //DB Need to overwrite the Selection member variable so that IsEventSelected function operates correctly. - // Consequently, store the selection cuts already saved in the sample, overwrite the Selection variable, then reset - std::vector< std::vector > tmp_Selection = Selection; - - std::vector< std::vector > SelectionVecToApply; - - //DB Add all the predefined selections to the selection vector which will be applied - for (int iSelec=0;iSelecGetNbins(),Axis->GetXbins()->GetArray()); - } else { - std::vector xBinEdges = ReturnKinematicParameterBinning(ProjectionVar_Str); - double xbin_edges[xBinEdges.size()]; - for (unsigned int i=0;iFill(Var,Weight); - } - } - } - - //DB Reset the saved selection - Selection = tmp_Selection; - - return _h1DVar; -} diff --git a/samplePDF/samplePDFFDBase.h b/samplePDF/samplePDFFDBase.h index 1daae01e1..b11a58815 100644 --- a/samplePDF/samplePDFFDBase.h +++ b/samplePDF/samplePDFFDBase.h @@ -8,16 +8,16 @@ #include "TLegend.h" //MaCh3 includes -#include "OscProbCalcer/OscProbCalcerBase.h" -#include "Oscillator/OscillatorBase.h" - #include "splines/splineFDBase.h" #include "covariance/covarianceXsec.h" #include "covariance/covarianceOsc.h" #include "samplePDF/samplePDFBase.h" -#include "samplePDF/FDMCStruct.h" +#include "samplePDF/FarDetectorCoreInfoStruct.h" + +//forward declare so we don't bleed NuOscillator headers +class OscillatorBase; /// @brief Class responsible for handling implementation of samples used in analysis, reweighting and returning LLH class samplePDFFDBase : public samplePDFBase @@ -30,7 +30,7 @@ class samplePDFFDBase : public samplePDFBase virtual ~samplePDFFDBase(); int GetNDim(){return nDimensions;} //DB Function to differentiate 1D or 2D binning - std::string GetName(){return samplename;} + std::string GetName() const {return samplename;} //=============================================================================== // DB Reweighting and Likelihood functions @@ -57,10 +57,11 @@ class samplePDFFDBase : public samplePDFBase /// @brief including Dan's magic NuOscillator void SetupNuOscillator(); - virtual void setupSplines(fdmc_base *FDObj, const char *SplineFileName, int nutype, int signal){}; + virtual void setupSplines(FarDetectorCoreInfo *, const char *, int , int ){}; + void ReadSampleConfig(); - int getNMCSamples() {return MCSamples.size();} + int getNMCSamples() {return int(MCSamples.size());} int getNEventsInSample(int iSample) { if (iSample < 0 || iSample > getNMCSamples()) { @@ -81,8 +82,8 @@ class samplePDFFDBase : public samplePDFBase TH1* get1DVarHist(std::string ProjectionVar, std::vector< std::vector > SelectionVec = std::vector< std::vector >(), int WeightStyle=0, TAxis* Axis=nullptr); //ETA - new function to generically convert a string from xsec cov to a kinematic type - virtual inline int ReturnKinematicParameterFromString(std::string KinematicStr) = 0; - virtual inline std::string ReturnStringFromKinematicParameter(int KinematicVariable) = 0; + virtual int ReturnKinematicParameterFromString(std::string KinematicStr) = 0; + virtual std::string ReturnStringFromKinematicParameter(int KinematicVariable) = 0; protected: /// @brief DB Function to determine which weights apply to which types of samples pure virtual!! @@ -105,7 +106,8 @@ class samplePDFFDBase : public samplePDFBase /// @brief Function which does a lot of the lifting regarding the workflow in creating different MC objects void Initialise(); - splineFDBase *splineFile; + /// @brief Contains all your binned splines and handles the setup and the returning of weights from spline evaluations + std::unique_ptr SplineHandler; //=============================================================================== void fillSplineBins(); @@ -192,7 +194,7 @@ class samplePDFFDBase : public samplePDFBase //=============================================================================== //MC variables - std::vector MCSamples; + std::vector MCSamples; //=============================================================================== //=============================================================================== diff --git a/splines/CMakeLists.txt b/splines/CMakeLists.txt index 65c5c2e81..4b6f2b689 100644 --- a/splines/CMakeLists.txt +++ b/splines/CMakeLists.txt @@ -1,3 +1,8 @@ +ROOT_GENERATE_DICTIONARY(MaCh3SplineDict + ${CMAKE_CURRENT_SOURCE_DIR}/SplineCommon.h + LINKDEF ${CMAKE_CURRENT_SOURCE_DIR}/MaCh3SplinesLinkDef.h + OPTIONS -p) + set(HEADERS SplineBase.h splineFDBase.h @@ -11,14 +16,10 @@ add_library(Splines SHARED SplineBase.cpp splineFDBase.cpp SplineMonolith.cpp + #MaCh3SplineDict.cxx #KS: This will have to be sperate library because root like C-style casting too much $<$>:gpuSplineUtils.cu> ) -ROOT_GENERATE_DICTIONARY(MaCh3SplineDict - ${CMAKE_CURRENT_SOURCE_DIR}/SplineCommon.h - LINKDEF ${CMAKE_CURRENT_SOURCE_DIR}/MaCh3SplinesLinkDef.h - OPTIONS -p) - if(NOT CPU_ONLY) set_target_properties(Splines PROPERTIES CUDA_SEPARABLE_COMPILATION ON) #KS: In cmake 3.18 this is no longer needed https://cmake.org/cmake/help/latest/variable/CMAKE_CUDA_ARCHITECTURES.html#cmake-cuda-architectures @@ -29,13 +30,17 @@ set_target_properties(Splines PROPERTIES PUBLIC_HEADER "${HEADERS}" EXPORT_NAME Splines) -target_link_libraries(Splines Manager MaCh3CompilerOptions) +target_link_libraries(Splines PUBLIC Manager) +target_link_libraries(Splines PRIVATE MaCh3Warnings) target_include_directories(Splines PUBLIC $ $ ) +#install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libMaCh3SplineDict_rdict.pcm DESTINATION lib/) +#install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libMaCh3SplineDict.rootmap DESTINATION lib/) + install(TARGETS Splines EXPORT MaCh3-targets LIBRARY DESTINATION lib/ diff --git a/splines/SplineBase.cpp b/splines/SplineBase.cpp index a839487a2..157ad53ec 100644 --- a/splines/SplineBase.cpp +++ b/splines/SplineBase.cpp @@ -1,5 +1,7 @@ #include "splines/SplineBase.h" +#pragma GCC diagnostic ignored "-Wuseless-cast" +#pragma GCC diagnostic ignored "-Wfloat-conversion" // ***************************************** SplineBase::SplineBase() { @@ -39,7 +41,7 @@ void SplineBase::getTF1Coeff(TF1_red* &spl, int &nPoints, float *& coeffs) { // TSpline3 can only take doubles, not floats // But our GPU is slow with doubles, so need to cast to float for (int i = 0; i < nPoints; i++) { - coeffs[i] = spl->GetParameter(i); + coeffs[i] = float(spl->GetParameter(M3::int_t(i))); } // The structure is now coeffs = {a,b,c,d,e} } diff --git a/splines/SplineCommon.h b/splines/SplineCommon.h index 2a8814870..54f94bb21 100644 --- a/splines/SplineCommon.h +++ b/splines/SplineCommon.h @@ -12,7 +12,7 @@ #endif /// KS: We store coefficients {y,b,c,d} in one array one by one, this is only to define it once rather then insert "4" all over the code -#define _nCoeff_ 4 +#define _nCoeff_ 4 /// KS: For TF1 we store at most 5 coefficients, we could make it more flexible but for now define it here to make future changes easier to track #define _nTF1Coeff_ 2 diff --git a/splines/SplineMonolith.cpp b/splines/SplineMonolith.cpp index ea9e44c7c..81f9df932 100644 --- a/splines/SplineMonolith.cpp +++ b/splines/SplineMonolith.cpp @@ -4,11 +4,13 @@ #include "splines/gpuSplineUtils.cuh" #endif +#pragma GCC diagnostic ignored "-Wuseless-cast" +#pragma GCC diagnostic ignored "-Wfloat-conversion" + // ***************************************** //Set everything to NULL or 0 void SMonolith::Initialise() { // ***************************************** - #ifdef CUDA MACH3LOG_INFO("Using GPU version event by event monolith"); gpu_spline_handler = nullptr; @@ -26,19 +28,14 @@ void SMonolith::Initialise() { NTF1_valid = 0; NSplines_total_large = 0; - index_cpu = nullptr; - index_TF1_cpu = nullptr; - cpu_weights_var = nullptr; + cpu_weights_spline_var = nullptr; cpu_weights = nullptr; cpu_weights_tf1_var = nullptr; cpu_total_weights = nullptr; - SplineInfoArray = nullptr; - segments = NULL; - vals = NULL; - - return; + segments = nullptr; + ParamValues = nullptr; } // ***************************************** @@ -87,16 +84,16 @@ void SMonolith::PrepareForGPU(std::vector > //KS: Since we are going to copy it each step use fancy CUDA memory allocation #ifdef CUDA gpu_spline_handler->InitGPU_Segments(&segments); - gpu_spline_handler->InitGPU_Vals(&vals); + gpu_spline_handler->InitGPU_Vals(&ParamValues); #else segments = new short int[nParams](); - vals = new float[nParams](); + ParamValues = new float[nParams](); #endif - for (_int_ j = 0; j < nParams; j++) + for (M3::int_t j = 0; j < nParams; j++) { segments[j] = 0; - vals[j] = -999; + ParamValues[j] = -999; } // Number of objects we have in total if each event has *EVERY* spline. Needed for some arrays @@ -128,14 +125,14 @@ void SMonolith::PrepareForGPU(std::vector > #ifdef Weight_On_SplineBySpline_Basis // This holds the index of each spline - index_cpu = new int[NSplines_total_large]; - index_TF1_cpu = new int[NSplines_total_large]; + index_spline_cpu.resize(NSplines_total_large); + index_TF1_cpu.resize(NSplines_total_large); #ifdef MULTITHREAD #pragma omp parallel for #endif for (unsigned int j = 0; j < NSplines_total_large; j++) { - index_cpu[j] = -1; + index_spline_cpu[j] = -1; index_TF1_cpu[j] = -1; } // This holds the total CPU weights that gets read in samplePDFND @@ -207,14 +204,14 @@ void SMonolith::PrepareForGPU(std::vector > } } // Set the parameter number for this spline - cpu_spline_handler->paramNo_arr[NSplinesCounter] = ParamNumber; + cpu_spline_handler->paramNo_arr[NSplinesCounter] = short(ParamNumber); //KS: Fill map when each spline starts cpu_spline_handler->nKnots_arr[NSplinesCounter] = KnotCounter; KnotCounter += nPoints_tmp; #ifdef Weight_On_SplineBySpline_Basis // Set the index of the spline so we can tell apart from flat splines - index_cpu[EventCounter*nParams + ParamNumber] = NSplinesCounter; + index_spline_cpu[EventCounter*nParams + ParamNumber] = NSplinesCounter; #else ++ParamCounter; #endif @@ -234,11 +231,11 @@ void SMonolith::PrepareForGPU(std::vector > cpu_coeff_TF1_many[TF1PointsCounter+j] = temp_coeffs[j]; } // Save the number of points for this spline - cpu_nPoints_arr[TF1sCounter] = nPoints_tmp; + cpu_nPoints_arr[TF1sCounter] = short(nPoints_tmp); TF1PointsCounter += nPoints_tmp; // Set the parameter number for this spline - cpu_paramNo_TF1_arr[TF1sCounter] = ParamNumber; + cpu_paramNo_TF1_arr[TF1sCounter] = short(ParamNumber); #ifdef Weight_On_SplineBySpline_Basis // Set the index of the spline so we can tell apart from flat splines index_TF1_cpu[EventCounter*nParams + ParamNumber] = TF1sCounter; @@ -287,13 +284,13 @@ void SMonolith::PrepareForGPU(std::vector > MACH3LOG_WARN("Found in total {} BAD X", BadXCounter); #ifdef Weight_On_SplineBySpline_Basis // Make the array that holds all the returned weights from the GPU to pass to the CPU - cpu_weights_var = new float[NSplines_valid](); + cpu_weights_spline_var = new float[NSplines_valid](); cpu_weights_tf1_var = new float[NTF1_valid](); #else //KS: This is tricky as this variable use both by CPU and GPU, however if use CUDA we use cudaMallocHost #ifndef CUDA cpu_total_weights = new float[NEvents](); - cpu_weights_var = new float[NSplines_valid](); + cpu_weights_spline_var = new float[NSplines_valid](); cpu_weights_tf1_var = new float[NTF1_valid](); #endif #endif @@ -387,7 +384,6 @@ void SMonolith::MoveToGPU() { cpu_spline_handler = nullptr; MACH3LOG_INFO("Good GPU loading"); #endif - return; } // Need to specify template functions in header @@ -395,7 +391,7 @@ void SMonolith::MoveToGPU() { // Scan the master spline to get the maximum number of knots in any of the TSpline3* void SMonolith::ScanMasterSpline(std::vector > & MasterSpline, unsigned int &nEvents, - int &MaxPoints, + short int &MaxPoints, short int &numParams, int &nSplines, unsigned int &NSplinesValid, @@ -417,21 +413,21 @@ void SMonolith::ScanMasterSpline(std::vector nTF1_coeff = 0; // Check the number of events - nEvents = MasterSpline.size(); + nEvents = int(MasterSpline.size()); // Maximum number of splines one event can have (scan through and find this number) int nMaxSplines_PerEvent = 0; //KS: We later check that each event has the same number of splines so this is fine - numParams = MasterSpline[0].size(); + numParams = short(MasterSpline[0].size()); // Initialise - SplineInfoArray = new FastSplineInfo[numParams]; + SplineInfoArray.resize(numParams); // Loop over each parameter for(unsigned int EventCounter = 0; EventCounter < MasterSpline.size(); ++EventCounter) { // Check that each event has each spline saved if (numParams > 0) { - int TempSize = MasterSpline[EventCounter].size(); + int TempSize = int(MasterSpline[EventCounter].size()); if (TempSize != numParams) { MACH3LOG_ERROR("Found {} parameters for event {}", TempSize, EventCounter); MACH3LOG_ERROR("but was expecting {} since that's what I found for the previous event", numParams); @@ -439,7 +435,7 @@ void SMonolith::ScanMasterSpline(std::vector throw MaCh3Exception(__FILE__ , __LINE__ ); } } - numParams = MasterSpline[EventCounter].size(); + numParams = short(MasterSpline[EventCounter].size()); int nSplines_SingleEvent = 0; // Loop over each pointer @@ -453,7 +449,7 @@ void SMonolith::ScanMasterSpline(std::vector TSpline3_red* CurrSpline = dynamic_cast(TespFunc); int nPoints = CurrSpline->GetNp(); if (nPoints > MaxPoints) { - MaxPoints = nPoints; + MaxPoints = static_cast(nPoints); } numKnots += nPoints; nSplines_SingleEvent++; @@ -465,11 +461,11 @@ void SMonolith::ScanMasterSpline(std::vector SplineInfoArray[ParamNumber].nPts = CurrSpline->GetNp(); // Fill the x points - SplineInfoArray[ParamNumber].xPts = new _float_[SplineInfoArray[ParamNumber].nPts]; - for (_int_ k = 0; k < SplineInfoArray[ParamNumber].nPts; ++k) + SplineInfoArray[ParamNumber].xPts = new M3::float_t[SplineInfoArray[ParamNumber].nPts]; + for (M3::int_t k = 0; k < SplineInfoArray[ParamNumber].nPts; ++k) { - _float_ xtemp = -999.99; - _float_ ytemp = -999.99; + M3::float_t xtemp = M3::float_t(-999.99); + M3::float_t ytemp = M3::float_t(-999.99); CurrSpline->GetKnot(k, xtemp, ytemp); SplineInfoArray[ParamNumber].xPts[k] = xtemp; } @@ -491,13 +487,13 @@ void SMonolith::ScanMasterSpline(std::vector int Counter = 0; //KS: Sanity check that everything was set correctly - for (_int_ i = 0; i < numParams; ++i) + for (M3::int_t i = 0; i < numParams; ++i) { // KS: We don't find segment for TF1, so ignore this if (SplineType[i] == kTF1_red) continue; - const _int_ nPoints = SplineInfoArray[i].nPts; - const _float_* xArray = SplineInfoArray[i].xPts; + const M3::int_t nPoints = SplineInfoArray[i].nPts; + const M3::float_t* xArray = SplineInfoArray[i].xPts; if (nPoints == -999 || xArray == NULL) { Counter++; if(Counter < 5) { @@ -530,18 +526,18 @@ void SMonolith::LoadSplineFile(std::string FileName) { throw MaCh3Exception(__FILE__ , __LINE__ ); #endif - if (std::getenv("MACH3") != NULL) { + if (std::getenv("MACH3") != nullptr) { FileName.insert(0, std::string(std::getenv("MACH3"))+"/"); } TFile *SplineFile = new TFile(FileName.c_str(), "OPEN"); - TTree *Settings = (TTree*)SplineFile->Get("Settings"); - TTree *Monolith = (TTree*)SplineFile->Get("Monolith"); - TTree *Monolith_TF1 = (TTree*)SplineFile->Get("Monolith_TF1"); - TTree *ParamInfo = (TTree*)SplineFile->Get("ParamInfo"); - TTree *XKnots = (TTree*)SplineFile->Get("XKnots"); - TTree *EventInfo = (TTree*)SplineFile->Get("EventInfo"); - TTree *FastSplineInfoTree = (TTree*)SplineFile->Get("FastSplineInfoTree"); + TTree *Settings = SplineFile->Get("Settings"); + TTree *Monolith = SplineFile->Get("Monolith"); + TTree *Monolith_TF1 = SplineFile->Get("Monolith_TF1"); + TTree *ParamInfo = SplineFile->Get("ParamInfo"); + TTree *XKnots = SplineFile->Get("XKnots"); + TTree *EventInfo = SplineFile->Get("EventInfo"); + TTree *FastSplineInfoTree = SplineFile->Get("FastSplineInfoTree"); unsigned int NEvents_temp; short int nParams_temp; @@ -563,7 +559,7 @@ void SMonolith::LoadSplineFile(std::string FileName) { NEvents = NEvents_temp; nParams = nParams_temp; - _max_knots = _max_knots_temp; + _max_knots = static_cast(_max_knots_temp); nKnots = nKnots_temp; NSplines_valid = NSplines_valid_temp; NTF1_valid = nTF1Valid_temp; @@ -573,10 +569,10 @@ void SMonolith::LoadSplineFile(std::string FileName) { //KS: Since we are going to copy it each step use fancy CUDA memory allocation #ifdef CUDA gpu_spline_handler->InitGPU_Segments(&segments); - gpu_spline_handler->InitGPU_Vals(&vals); + gpu_spline_handler->InitGPU_Vals(&ParamValues); #else segments = new short int[nParams](); - vals = new float[nParams](); + ParamValues = new float[nParams](); #endif cpu_nParamPerEvent.resize(2*NEvents); @@ -593,7 +589,7 @@ void SMonolith::LoadSplineFile(std::string FileName) { //KS: This is tricky as this variable use both by CPU and GPU, however if use CUDA we use cudaMallocHost #ifndef CUDA cpu_total_weights = new float[NEvents](); - cpu_weights_var = new float[NSplines_valid](); + cpu_weights_spline_var = new float[NSplines_valid](); cpu_weights_tf1_var = new float[NTF1_valid](); #endif @@ -644,20 +640,20 @@ void SMonolith::LoadSplineFile(std::string FileName) { cpu_nParamPerEvent_tf1[i] = nParamPerEvent_tf1; } - _int_ nPoints = 0; + M3::int_t nPoints = 0; float xtemp[20]; FastSplineInfoTree->SetBranchAddress("nPts", &nPoints); FastSplineInfoTree->SetBranchAddress("xPts", &xtemp); - SplineInfoArray = new FastSplineInfo[nParams]; - for (_int_ i = 0; i < nParams; ++i) { + SplineInfoArray.resize(nParams); + for (M3::int_t i = 0; i < nParams; ++i) { FastSplineInfoTree->GetEntry(i); // Fill the number of points SplineInfoArray[i].nPts = nPoints; if(nPoints == -999) continue; - SplineInfoArray[i].xPts = new _float_[SplineInfoArray[i].nPts]; - for (_int_ k = 0; k < SplineInfoArray[i].nPts; ++k) + SplineInfoArray[i].xPts = new M3::float_t[SplineInfoArray[i].nPts]; + for (M3::int_t k = 0; k < SplineInfoArray[i].nPts; ++k) { SplineInfoArray[i].xPts[k] = xtemp[k]; } @@ -677,7 +673,7 @@ void SMonolith::LoadSplineFile(std::string FileName) { void SMonolith::PrepareSplineFile() { // ***************************************** std::string FileName = "SplineFile.root"; - if (std::getenv("MACH3") != NULL) { + if (std::getenv("MACH3") != nullptr) { FileName.insert(0, std::string(std::getenv("MACH3"))+"/"); } @@ -773,18 +769,18 @@ void SMonolith::PrepareSplineFile() { SplineFile->cd(); EventInfo->Write(); - _int_ nPoints = 0; + M3::int_t nPoints = 0; float xtemp[20]; FastSplineInfoTree->Branch("nPts", &nPoints, "nPts/I"); FastSplineInfoTree->Branch("xPts", xtemp, "xPts[nPts]/F"); - for (_int_ i = 0; i < nParams; ++i) + for (M3::int_t i = 0; i < nParams; ++i) { nPoints = SplineInfoArray[i].nPts; - for (_int_ k = 0; k < SplineInfoArray[i].nPts; ++k) + for (M3::int_t k = 0; k < SplineInfoArray[i].nPts; ++k) { - xtemp[k] = SplineInfoArray[i].xPts[k]; + xtemp[k] = float(SplineInfoArray[i].xPts[k]); } FastSplineInfoTree->Fill(); } @@ -817,21 +813,18 @@ SMonolith::~SMonolith() { ); //KS: Since we declared them using CUDA alloc we have to free memory using also cuda functions - gpu_spline_handler->CleanupGPU_Segments(segments, vals); + gpu_spline_handler->CleanupGPU_Segments(segments, ParamValues); delete gpu_spline_handler; #else if(segments != nullptr) delete[] segments; - if(vals != nullptr) delete[] vals; + if(ParamValues != nullptr) delete[] ParamValues; if(cpu_total_weights != nullptr) delete[] cpu_total_weights; #endif - if(SplineInfoArray != nullptr) delete[] SplineInfoArray; if(cpu_weights != nullptr) delete[] cpu_weights; - if(cpu_weights_var != nullptr) delete[] cpu_weights_var; + if(cpu_weights_spline_var != nullptr) delete[] cpu_weights_spline_var; if(cpu_weights_tf1_var != nullptr) delete[] cpu_weights_tf1_var; - if(index_cpu != nullptr) delete[] index_cpu; - if(index_TF1_cpu != nullptr) delete[] index_TF1_cpu; //KS: Those might be deleted or not depending on GPU/CPU TSpline3/TF1 DEBUG or not hence we check if not NULL if(cpu_spline_handler != nullptr) @@ -888,7 +881,7 @@ void SMonolith::getSplineCoeff_SepMany(TSpline3_red* &spl, int &nPoints, float * } // The coefficients we're writing to - _float_ x, y, b, c, d; + M3::float_t x, y, b, c, d; // TSpline3 can only take doubles, not floats // But our GPU is slow with doubles, so need to cast to float for(int i = 0; i < Np; i++) { @@ -928,12 +921,12 @@ void SMonolith::Evaluate() { // The main call to the GPU gpu_spline_handler->RunGPU_SplineMonolith( #ifdef Weight_On_SplineBySpline_Basis - cpu_weights_var, + cpu_weights_spline_var, cpu_weights_tf1_var, #else cpu_total_weights, #endif - vals, + ParamValues, segments, NSplines_valid, NTF1_valid); @@ -956,8 +949,6 @@ void SMonolith::Evaluate() { //KS: Huge MP loop over all events calculating total weight ModifyWeights(); - - return; } #endif @@ -969,14 +960,14 @@ void SMonolith::FindSplineSegment() { // ************************* // Loop over the splines //KS: Tried multithreading here with 48 splines and it is faster with one thread, maybe in future multithreading will be worth revisiting - for (_int_ i = 0; i < nParams; ++i) + for (M3::int_t i = 0; i < nParams; ++i) { - const _int_ nPoints = SplineInfoArray[i].nPts; - const _float_* xArray = SplineInfoArray[i].xPts; + const M3::int_t nPoints = SplineInfoArray[i].nPts; + const M3::float_t* xArray = SplineInfoArray[i].xPts; // Get the variation for this reconfigure for the ith parameter - const _float_ xvar = *SplineInfoArray[i].splineParsPointer; - vals[i] = xvar; + const float xvar = float(*SplineInfoArray[i].splineParsPointer); + ParamValues[i] = xvar; // EM: if we have a parameter that has no response for any event (i.e. all splines have just one knot), then skip it and avoid a seg fault here // In principle, such parameters shouldn't really be included in the first place, but with new det syst splines this @@ -984,10 +975,10 @@ void SMonolith::FindSplineSegment() { if(xArray == NULL) continue; // The segment we're interested in (klow in ROOT code) - _int_ segment = 0; - _int_ kHigh = nPoints-1; + M3::int_t segment = 0; + M3::int_t kHigh = nPoints-1; //KS: We expect new segment is very close to previous - const _int_ PreviousSegment = SplineInfoArray[i].CurrSegment; + const M3::int_t PreviousSegment = SplineInfoArray[i].CurrSegment; // If the variation is below the lowest saved spline point if (xvar <= xArray[0]) { @@ -1002,12 +993,12 @@ void SMonolith::FindSplineSegment() { // If the variation is between the maximum and minimum, perform a binary search } else { // The top point we've got - _int_ kHalf = 0; + M3::int_t kHalf = 0; // While there is still a difference in the points (we haven't yet found the segment) // This is a binary search, incrementing segment and decrementing kHalf until we've found the segment while (kHigh - segment > 1) { // Increment the half-step - kHalf = (segment + kHigh)/2; + kHalf = M3::int_t((segment + kHigh)/2); // If our variation is above the kHalf, set the segment to kHalf if (xvar > xArray[kHalf]) { segment = kHalf; @@ -1024,7 +1015,7 @@ void SMonolith::FindSplineSegment() { // and literally just multiply lots of numbers together on the GPU without any algorithm // Update the values and which segment it belongs to SplineInfoArray[i].CurrSegment = segment; - segments[i] = SplineInfoArray[i].CurrSegment; + segments[i] = short(SplineInfoArray[i].CurrSegment); #ifdef DEBUG if (SplineInfoArray[i].xPts[segment] > xvar && segment != 0) { @@ -1034,7 +1025,7 @@ void SMonolith::FindSplineSegment() { MACH3LOG_ERROR("Found segment = {}", segment); MACH3LOG_ERROR("Doing variation = {}", xvar); MACH3LOG_ERROR("x in spline = {}", SplineInfoArray[i].xPts[segment]); - for (_int_ j = 0; j < SplineInfoArray[j].nPts; ++j) { + for (M3::int_t j = 0; j < SplineInfoArray[j].nPts; ++j) { MACH3LOG_ERROR(" {} = {}", j, SplineInfoArray[i].xPts[j]); } throw MaCh3Exception(__FILE__ , __LINE__ ); @@ -1064,7 +1055,7 @@ void SMonolith::CalcSplineWeights() { const short int segment = segments[Param]; //KS: Segment for coeff_x is simply parameter*max knots + segment as each parameters has the same spacing - const short int segment_X = Param*_max_knots+segment; + const short int segment_X = short(Param*_max_knots+segment); //KS: Find knot position in out monolithical structure const unsigned int CurrentKnotPos = cpu_spline_handler->nKnots_arr[splineNum]*_nCoeff_+segment*_nCoeff_; @@ -1076,12 +1067,12 @@ void SMonolith::CalcSplineWeights() { const float fC = cpu_spline_handler->coeff_many[CurrentKnotPos+2]; const float fD = cpu_spline_handler->coeff_many[CurrentKnotPos+3]; // The is the variation itself (needed to evaluate variation - stored spline point = dx) - const float dx = vals[Param] - cpu_spline_handler->coeff_x[segment_X]; + const float dx = ParamValues[Param] - cpu_spline_handler->coeff_x[segment_X]; //CW: Wooow, let's use some fancy intrinsic and pull down the processing time by <1% from normal multiplication! HURRAY - cpu_weights_var[splineNum] = fmaf(dx, fmaf(dx, fmaf(dx, fD, fC), fB), fY); + cpu_weights_spline_var[splineNum] = fmaf(dx, fmaf(dx, fmaf(dx, fD, fC), fB), fY); // Or for the more "easy to read" version: - //cpu_weights_var[splineNum] = (fY+dx*(fB+dx*(fC+dx*fD))); + //cpu_weights_spline_var[splineNum] = (fY+dx*(fB+dx*(fC+dx*fD))); } #ifdef MULTITHREAD @@ -1090,7 +1081,7 @@ void SMonolith::CalcSplineWeights() { for (unsigned int tf1Num = 0; tf1Num < NTF1_valid; ++tf1Num) { // The is the variation itself (needed to evaluate variation - stored spline point = dx) - const float x = vals[cpu_paramNo_TF1_arr[tf1Num]]; + const float x = ParamValues[cpu_paramNo_TF1_arr[tf1Num]]; // Read the coefficients const float a = cpu_coeff_TF1_many[tf1Num*_nTF1Coeff_]; @@ -1105,7 +1096,6 @@ void SMonolith::CalcSplineWeights() { //KS: End parallel region } #endif - return; } //********************************************************* @@ -1129,7 +1119,7 @@ void SMonolith::ModifyWeights(){ #pragma omp simd #endif for (unsigned int id = 0; id < numParams; ++id) { - totalWeight *= cpu_weights_var[startIndex + id]; + totalWeight *= cpu_weights_spline_var[startIndex + id]; } //Now TF1 // Extract the parameters for the current event @@ -1151,7 +1141,6 @@ void SMonolith::ModifyWeights(){ //KS: Name is confusing but what it does it make a nice mapping used for debugging ModifyWeights_GPU(); #endif - return; } //********************************************************* @@ -1164,8 +1153,8 @@ void SMonolith::ModifyWeights_GPU(){ #pragma omp parallel for #endif for (unsigned int i = 0; i < NSplines_total_large; ++i) { - if (index_cpu[i] >= 0) { - cpu_weights[i] = cpu_weights_var[index_cpu[i]]; + if (index_spline_cpu[i] >= 0) { + cpu_weights[i] = cpu_weights_spline_var[index_spline_cpu[i]]; } else if (index_TF1_cpu[i] >= 0) { cpu_weights[i] = cpu_weights_tf1_var[index_TF1_cpu[i]]; } else { @@ -1173,7 +1162,6 @@ void SMonolith::ModifyWeights_GPU(){ } } #endif - return; } //********************************************************* @@ -1191,8 +1179,6 @@ void SMonolith::PrintInitialsiation() { MACH3LOG_INFO("On average {:.2f} TF1 per event ({}/{})", float(NTF1_valid)/float(NEvents), NTF1_valid, NEvents); MACH3LOG_INFO("Size of TF1 coefficient (a,b,c,d,e) array = {:.2f} MB", double(sizeof(float)*NTF1_valid*_nTF1Coeff_)/1.E6); - - return; } //********************************************************* @@ -1202,5 +1188,4 @@ void SMonolith::SynchroniseMemTransfer() { #ifdef CUDA SynchroniseSplines(); #endif - return; } diff --git a/splines/SplineMonolith.h b/splines/SplineMonolith.h index f7a08b51c..5eb1ee46b 100644 --- a/splines/SplineMonolith.h +++ b/splines/SplineMonolith.h @@ -43,7 +43,7 @@ class SMonolith : public SplineBase { /// @param spline_ParsPointers Vector of pointers to spline params inline void setSplinePointers(std::vector< const double* > spline_ParsPointers) { splineParsPointer = spline_ParsPointers; - for (_int_ i = 0; i < nParams; ++i) SplineInfoArray[i].splineParsPointer = spline_ParsPointers[i]; + for (M3::int_t i = 0; i < nParams; ++i) SplineInfoArray[i].splineParsPointer = spline_ParsPointers[i]; }; /// The returned gpu weights, read by the GPU @@ -66,7 +66,7 @@ class SMonolith : public SplineBase { /// @param nTF1Valid Total number of valid (not null) TF1 inline void ScanMasterSpline(std::vector > & MasterSpline, unsigned int &nEvents, - int &MaxPoints, + short int &MaxPoints, short int &numParams, int &nSplines, unsigned int &NSplinesValid, @@ -108,11 +108,11 @@ class SMonolith : public SplineBase { /// Array of FastSplineInfo structs: keeps information on each xsec spline for fast evaluation /// Method identical to TSpline3::Eval(double) but faster because less operations - FastSplineInfo *SplineInfoArray; + std::vector SplineInfoArray; /// Store currently found segment they are not in FastSplineInfo as in case of GPU we need to copy paste it to GPU short int *segments; /// Store parameter values they are not in FastSplineInfo as in case of GPU we need to copy paste it to GPU - float *vals; + float *ParamValues; /// This holds pointer to parameter position which we later copy paste it to GPU std::vector< const double* > splineParsPointer; @@ -121,11 +121,11 @@ class SMonolith : public SplineBase { /// Number of NIWG parameters that have splines short int nParams; /// Max knots for production - int _max_knots; + short int _max_knots; /// holds the index for good splines; don't do unsigned since starts with negative value! - int *index_cpu; + std::vector index_spline_cpu; /// holds the index for good TF1; don't do unsigned since starts with negative value! - int *index_TF1_cpu; + std::vector index_TF1_cpu; /// Number of valid splines unsigned int NSplines_valid; @@ -141,8 +141,7 @@ class SMonolith : public SplineBase { unsigned int nTF1coeff; /// CPU arrays to hold weight for each spline - float *cpu_weights_var; - + float *cpu_weights_spline_var; /// CPU arrays to hold weight for each TF1 float *cpu_weights_tf1_var; diff --git a/splines/SplineStructs.h b/splines/SplineStructs.h index 42845c719..1584f0a13 100644 --- a/splines/SplineStructs.h +++ b/splines/SplineStructs.h @@ -4,6 +4,16 @@ #include "covariance/covarianceXsec.h" #include "samplePDF/Structs.h" +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuseless-cast" +#pragma GCC diagnostic ignored "-Wfloat-conversion" + + +/// @file SplineStructs.h +/// @brief Contains structures and helper functions for handling spline representations of systematic parameters in the MaCh3. + // ******************* /// @brief CW: Add a struct to hold info about the splinified xsec parameters and help with FindSplineSegment struct FastSplineInfo { @@ -23,83 +33,18 @@ struct FastSplineInfo { } /// Number of points in spline - _int_ nPts; + M3::int_t nPts; /// Array of the knots positions - _float_ *xPts; + M3::float_t *xPts; /// Array of what segment of spline we're currently interested in. Gets updated once per MCMC iteration - _int_ CurrSegment; + M3::int_t CurrSegment; /// Array of the knots positions const double* splineParsPointer; }; -// ******************************************** -/// @brief CW: Generic xsec class. Can use TF1 or TSpline3 or TSpline5 here, tjoho -template -class XSecStruct { -// ******************************************** -public: - /// @brief CW: The light constructor - XSecStruct(_int_ NumberOfSplines) { - nParams = NumberOfSplines; - Func.reserve(nParams); - for (int i = 0; i < nParams; ++i) { - Func[i] = NULL; - } - } - - /// @brief CW: The empty constructor - XSecStruct() { - nParams = 0; - Func = NULL; - }; - - /// @brief CW: The light destructor - ~XSecStruct() { - for (int i = 0; i < nParams; ++i) { - if (Func[i]) delete Func[i]; - } - } - - /// @brief CW: Get number of splines - inline _int_ GetNumberOfParams() { return nParams; } - - /// @brief CW: The Printer - inline void Print() { - MACH3LOG_INFO(" Splines:"); - for (int i = 0; i < nParams; ++i) { - if (!Func[i]) continue; - MACH3LOG_INFO(" {:<25}", Func[i]->GetName()); - } - } - - /// @brief CW: Set the number of splines for this event - inline void SetSplineNumber(const _int_ NumberOfSplines) { - nParams = NumberOfSplines; - Func = new T[nParams]; - } - - /// @brief CW: Get the function for the nth spline - inline T GetFunc(const _int_ nSpline) { return Func[nSpline]; } - /// @brief CW: Set the function for the nth spline - inline void SetFunc(const _int_ nSpline, T Function) { Func[nSpline] = Function; } - /// @brief CW: Eval the current variation - inline double Eval(const _int_ nSpline, const _float_ variation) { - // Some will be NULL, check this - if (Func[nSpline]) { - return Func[nSpline]->Eval(variation); - } else { - return 1.0; - } - } -private: - /// Number of parameters - _int_ nParams; - /// The function - T* Func; -}; // *************************************************************************** /// @brief EM: Apply capping to knot weight for specified spline parameter. param graph needs to have been set in xsecgraph array first @@ -166,7 +111,7 @@ class TResponseFunction_red { /// @brief KS: Printer virtual void Print()=0; /// @brief DL: Get number of points - virtual _int_ GetNp()=0; + virtual M3::int_t GetNp()=0; }; // ************************ @@ -189,7 +134,7 @@ class TF1_red: public TResponseFunction_red { } /// @brief The useful constructor with deep copy - TF1_red(_int_ nSize, _float_* Array) : TResponseFunction_red() { + TF1_red(M3::int_t nSize, M3::float_t* Array) : TResponseFunction_red() { length = nSize; for (int i = 0; i < length; ++i) { Par[i] = Array[i]; @@ -204,11 +149,11 @@ class TF1_red: public TResponseFunction_red { /// @brief Set the function inline void SetFunc(TF1* &Func) { - length = Func->GetNpar(); + length = M3::int_t(Func->GetNpar()); if (Par != NULL) delete[] Par; - Par = new _float_[length]; + Par = new M3::float_t[length]; for (int i = 0; i < length; ++i) { - Par[i] = Func->GetParameter(i); + Par[i] = M3::float_t(Func->GetParameter(i)); } delete Func; Func = NULL; @@ -238,12 +183,12 @@ class TF1_red: public TResponseFunction_red { } /// @brief Set a parameter to a value - inline void SetParameter(_int_ Parameter, _float_ Value) { + inline void SetParameter(M3::int_t Parameter, M3::float_t Value) { Par[Parameter] = Value; } /// @brief Get a parameter value - double GetParameter(_int_ Parameter) { + double GetParameter(M3::int_t Parameter) { if (Parameter > length) { MACH3LOG_ERROR("You requested parameter number {} but length is {} parameters", Parameter, length); throw MaCh3Exception(__FILE__ , __LINE__ ); @@ -253,9 +198,9 @@ class TF1_red: public TResponseFunction_red { } /// @brief Set the size - inline void SetSize(_int_ nSpline) { + inline void SetSize(M3::int_t nSpline) { length = nSpline; - Par = new _float_[length]; + Par = new M3::float_t[length]; } /// @brief Get the size inline int GetSize() { return length; } @@ -278,12 +223,12 @@ class TF1_red: public TResponseFunction_red { } /// @brief DL: Get number of points - inline _int_ GetNp() override { return length; } + inline M3::int_t GetNp() override { return length; } private: /// The parameters - _float_* Par; - _int_ length; + M3::float_t* Par; + M3::int_t length; }; // ************************ @@ -308,16 +253,16 @@ class TSpline3_red: public TResponseFunction_red { } /// @brief constructor taking parameters - TSpline3_red(_float_ *X, _float_ *Y, _int_ N, _float_ **P) : TResponseFunction_red() { + TSpline3_red(M3::float_t *X, M3::float_t *Y, M3::int_t N, M3::float_t **P) : TResponseFunction_red() { nPoints = N; // Save the parameters for each knot - Par = new _float_*[nPoints]; + Par = new M3::float_t*[nPoints]; // Save the positions of the knots - XPos = new _float_[nPoints]; + XPos = new M3::float_t[nPoints]; // Save the y response at each knot - YResp = new _float_[nPoints]; + YResp = new M3::float_t[nPoints]; for(int j = 0; j < N; ++j){ - Par[j] = new _float_[3]; + Par[j] = new M3::float_t[3]; Par[j][0] = P[j][0]; Par[j][1] = P[j][1]; Par[j][2] = P[j][2]; @@ -334,7 +279,7 @@ class TSpline3_red: public TResponseFunction_red { } /// @brief Set the function inline void SetFunc(TSpline3* &spline, SplineInterpolation InterPolation = kTSpline3) { - nPoints = spline->GetNp(); + nPoints = M3::int_t(spline->GetNp()); if (Par != NULL) { for (int i = 0; i < nPoints; ++i) { delete[] Par[i]; @@ -346,25 +291,25 @@ class TSpline3_red: public TResponseFunction_red { if (XPos != NULL) delete[] XPos; if (YResp != NULL) delete[] YResp; // Save the parameters for each knot - Par = new _float_*[nPoints]; + Par = new M3::float_t*[nPoints]; // Save the positions of the knots - XPos = new _float_[nPoints]; + XPos = new M3::float_t[nPoints]; // Save the y response at each knot - YResp = new _float_[nPoints]; + YResp = new M3::float_t[nPoints]; //KS: Default TSpline3 ROOT implementation if(InterPolation == kTSpline3) { for (int i = 0; i < nPoints; ++i) { // 3 is the size of the TSpline3 coefficients - Par[i] = new _float_[3]; + Par[i] = new M3::float_t[3]; double x = -999.99, y = -999.99, b = -999.99, c = -999.99, d = -999.99; spline->GetCoeff(i, x, y, b, c, d); - XPos[i] = x; - YResp[i] = y; - Par[i][0] = b; - Par[i][1] = c; - Par[i][2] = d; + XPos[i] = M3::float_t(x); + YResp[i] = M3::float_t(y); + Par[i][0] = M3::float_t(b); + Par[i][1] = M3::float_t(c); + Par[i][2] = M3::float_t(d); } } //CW: Reduce to use linear spline interpolation for certain parameters @@ -377,17 +322,17 @@ class TSpline3_red: public TResponseFunction_red { { for (int k = 0; k < nPoints; ++k) { // 3 is the size of the TSpline3 coefficients - Par[k] = new _float_[3]; + Par[k] = new M3::float_t[3]; Double_t x1, y1, b1, c1, d1, x2, y2, b2, c2, d2 = 0; spline->GetCoeff(k, x1, y1, b1, c1, d1); spline->GetCoeff(k+1, x2, y2, b2, c2, d2); double tempb = (y2-y1)/(x2-x1); - XPos[k] = x1; - YResp[k] = y1; - Par[k][0] = tempb; - Par[k][1] = 0; - Par[k][2] = 0; + XPos[k] = M3::float_t(x1); + YResp[k] = M3::float_t(y1); + Par[k][0] = M3::float_t(tempb); + Par[k][1] = M3::float_t(0); + Par[k][2] = M3::float_t(0); } } //EM: Akima spline is similar to regular cubic spline but is allowed to be discontinuous in 2nd derivative and coefficients in any segment @@ -397,32 +342,32 @@ class TSpline3_red: public TResponseFunction_red { // get the knot values for the spline for (int i = 0; i < nPoints; ++i) { // 3 is the size of the TSpline3 coefficients - Par[i] = new _float_[3]; + Par[i] = new M3::float_t[3]; double x = -999.99, y = -999.99; spline->GetKnot(i, x, y); - XPos[i] = x; - YResp[i] = y; + XPos[i] = M3::float_t(x); + YResp[i] = M3::float_t(y); } - _float_* mvals = new _float_[nPoints + 3]; - _float_* svals = new _float_[nPoints + 1]; + M3::float_t* mvals = new M3::float_t[nPoints + 3]; + M3::float_t* svals = new M3::float_t[nPoints + 1]; for (int i = -2; i <= nPoints; ++i) { // if segment is first or last or 2nd to first or last, needs to be dealt with slightly differently; // need to estimate the values for additinal points which would lie outside of the spline if(i ==-2){ - mvals[i+2] = 3.0 * (YResp[1] - YResp[0]) / (XPos[1] - XPos[0]) - 2.0*(YResp[2] - YResp[1]) / (XPos[2] - XPos[1]); + mvals[i+2] = M3::float_t(3.0 * (YResp[1] - YResp[0]) / (XPos[1] - XPos[0]) - 2.0*(YResp[2] - YResp[1]) / (XPos[2] - XPos[1])); } else if(i==-1){ - mvals[i+2] = 2.0 * (YResp[1] - YResp[0]) / (XPos[1] - XPos[0]) - (YResp[2] - YResp[1]) / (XPos[2] - XPos[1]); + mvals[i+2] = M3::float_t(2.0 * (YResp[1] - YResp[0]) / (XPos[1] - XPos[0]) - (YResp[2] - YResp[1]) / (XPos[2] - XPos[1])); } else if(i==nPoints){ - mvals[i+2] = 3.0 * (YResp[nPoints-1] - YResp[nPoints-2]) / (XPos[nPoints-1] - XPos[nPoints-2]) - 2.0*(YResp[nPoints-2] - YResp[nPoints-3]) / (XPos[nPoints-2] - XPos[nPoints-3]); + mvals[i+2] = M3::float_t(3.0 * (YResp[nPoints-1] - YResp[nPoints-2]) / (XPos[nPoints-1] - XPos[nPoints-2]) - 2.0*(YResp[nPoints-2] - YResp[nPoints-3]) / (XPos[nPoints-2] - XPos[nPoints-3])); } else if(i == nPoints - 1){ - mvals[i+2] = 2.0 * (YResp[nPoints-1] - YResp[nPoints-2]) / (XPos[nPoints-1] - XPos[nPoints-2]) - (YResp[nPoints-2] - YResp[nPoints-3]) / (XPos[nPoints-2] - XPos[nPoints-3]); + mvals[i+2] = M3::float_t(2.0 * (YResp[nPoints-1] - YResp[nPoints-2]) / (XPos[nPoints-1] - XPos[nPoints-2]) - (YResp[nPoints-2] - YResp[nPoints-3]) / (XPos[nPoints-2] - XPos[nPoints-3])); } //standard internal segment else{ @@ -431,19 +376,19 @@ class TSpline3_red: public TResponseFunction_red { } for(int i =2; i<=nPoints+2; i++){ - if (abs(mvals[i+1] - mvals[i]) + abs(mvals[i-1] - mvals[i-2]) != 0.0){ - svals[i-2] = (abs(mvals[i+1] - mvals[i]) * mvals[i-1] + abs(mvals[i-1] - mvals[i-2]) *mvals[i]) / (abs(mvals[i+1] - mvals[i]) + abs(mvals[i-1] - mvals[i-2])); + if (std::abs(mvals[i+1] - mvals[i]) + std::abs(mvals[i-1] - mvals[i-2]) != 0.0){ + svals[i-2] = (std::abs(mvals[i+1] - mvals[i]) * mvals[i-1] + std::abs(mvals[i-1] - mvals[i-2]) *mvals[i]) / (std::abs(mvals[i+1] - mvals[i]) + std::abs(mvals[i-1] - mvals[i-2])); } else{svals[i-2] = mvals[i];} } // calculate the coefficients for the spline for(int i = 0; i GetCoeff(i, x, y, b, c, d); if((c == 0.0 && d == 0.0)){ - Par[i][0] = b; - Par[i][1] = 0.0; - Par[i][2] = 0.0; + Par[i][0] = M3::float_t(b); + Par[i][1] = M3::float_t(0.0); + Par[i][2] = M3::float_t(0.0); } } delete[] mvals; @@ -470,20 +415,20 @@ class TSpline3_red: public TResponseFunction_red { else if(InterPolation == kMonotonic) { // values of the secants at each point (for calculating monotone spline) - _float_ * Secants = new _float_[nPoints -1]; + M3::float_t * Secants = new M3::float_t[nPoints -1]; // values of the tangens at each point (for calculating monotone spline) - _float_ * Tangents = new _float_[nPoints]; + M3::float_t * Tangents = new M3::float_t[nPoints]; // get the knot values for the spline for (int i = 0; i < nPoints; ++i) { // 3 is the size of the TSpline3 coefficients - Par[i] = new _float_[3]; + Par[i] = new M3::float_t[3]; double x = -999.99, y = -999.99; spline->GetKnot(i, x, y); - XPos[i] = x; - YResp[i] = y; + XPos[i] = M3::float_t(x); + YResp[i] = M3::float_t(y); Tangents[i] = 0.0; } @@ -510,13 +455,13 @@ class TSpline3_red: public TResponseFunction_red { Tangents[0] = Secants[0]; Tangents[nPoints-1] = Secants[nPoints -2]; - _float_ alpha; - _float_ beta; + M3::float_t alpha; + M3::float_t beta; // second pass over knots to calculate tangents for (int i = 1; i < nPoints-1; ++i) { if ((Secants[i-1] >= 0.0 && Secants[i] >= 0.0) | (Secants[i-1] < 0.0 && Secants[i] < 0.0)){ //check for same sign - Tangents[i] = (Secants[i-1] + Secants[i]) /2.0; + Tangents[i] = M3::float_t((Secants[i-1] + Secants[i]) /2.0); } } @@ -539,21 +484,21 @@ class TSpline3_red: public TResponseFunction_red { } if (alpha * alpha + beta * beta >9.0){ - _float_ tau = 3.0 / sqrt(alpha * alpha + beta * beta); + M3::float_t tau = M3::float_t(3.0 / std::sqrt(alpha * alpha + beta * beta)); Tangents[i] = tau * alpha * Secants[i]; Tangents[i+1] = tau * beta * Secants[i]; } } } // finished rescaling tangents // fourth pass over knots to calculate the coefficients for the spline - _float_ dx; + M3::float_t dx; for(int i = 0; i GetCoeff(i, x, y, b, c, d); if((c == 0.0 && d == 0.0)){ - Par[i][0] = b; + Par[i][0] = M3::float_t(b); Par[i][1] = 0.0; Par[i][2] = 0.0; } @@ -656,7 +601,7 @@ class TSpline3_red: public TResponseFunction_red { // Get the segment for this variation int segment = FindX(var); // The get the coefficients for this variation - _float_ x = -999.99, y = -999.99, b = -999.99, c = -999.99, d = -999.99; + M3::float_t x = M3::float_t(-999.99), y = M3::float_t(-999.99), b = M3::float_t(-999.99), c = M3::float_t(-999.99), d = M3::float_t(-999.99); GetCoeff(segment, x, y, b, c, d); double dx = var - x; // Evaluate the third order polynomial @@ -665,15 +610,15 @@ class TSpline3_red: public TResponseFunction_red { } /// @brief CW: Get the number of points - inline _int_ GetNp() override { return nPoints; } + inline M3::int_t GetNp() override { return nPoints; } // Get the ith knot's x and y position - inline void GetKnot(int i, _float_ &xtmp, _float_ &ytmp) { + inline void GetKnot(int i, M3::float_t &xtmp, M3::float_t &ytmp) { xtmp = XPos[i]; ytmp = YResp[i]; } /// @brief CW: Get the coefficient of a given segment - inline void GetCoeff(int segment, _float_ &x, _float_ &y, _float_ &b, _float_ &c, _float_ &d) { + inline void GetCoeff(int segment, M3::float_t &x, M3::float_t &y, M3::float_t &b, M3::float_t &c, M3::float_t &d) { b = Par[segment][0]; c = Par[segment][1]; d = Par[segment][2]; @@ -710,13 +655,13 @@ class TSpline3_red: public TResponseFunction_red { protected: //changed to protected from private so can be accessed by derived classes /// Number of points/knot in TSpline3 - _int_ nPoints; + M3::int_t nPoints; /// Always uses a third order polynomial, so hard-code the number of coefficients in implementation - _float_ **Par; + M3::float_t **Par; /// Positions of each x for each knot - _float_ *XPos; + M3::float_t *XPos; /// y-value for each knot - _float_ *YResp; + M3::float_t *YResp; }; // ***************************************** @@ -725,7 +670,7 @@ class TSpline3_red: public TResponseFunction_red { inline bool isFlat(TSpline3_red* &spl) { // ***************************************** int Np = spl->GetNp(); - _float_ x, y, b, c, d; + M3::float_t x, y, b, c, d; // Go through spline segment parameters, // Get y values for each spline knot, // Every knot must evaluate to 1.0 to create a flat spline @@ -813,3 +758,6 @@ inline std::vector > ReduceTF1(std::vectorGetNp(); //Now to fill up our coefficient arrayss - _float_* tmpXCoeffArr = new _float_[splineKnots]; - _float_* tmpManyCoeffArr = new _float_[splineKnots*4]; + M3::float_t* tmpXCoeffArr = new M3::float_t[splineKnots]; + M3::float_t* tmpManyCoeffArr = new M3::float_t[splineKnots*4]; int iCoeff=coeffindexvec[splineindex]; getSplineCoeff_SepMany(splineindex, tmpXCoeffArr, tmpManyCoeffArr); for(int i = 0; i < splineKnots; i++){ - if(tmpXCoeffArr[i]==-999){ - MACH3LOG_ERROR("looks like we've got a bad X, index = {}", i); - throw MaCh3Exception(__FILE__ , __LINE__ ); - } xcoeff_arr[iCoeff+i]=tmpXCoeffArr[i]; for(int j=0; j<4; j++){ - if(tmpManyCoeffArr[i*4+j]==-999){ - MACH3LOG_ERROR("Bad ybcd, index: {}, {}", i, j); - MACH3LOG_ERROR("Param Values: {}, {}, {}, {}", - tmpManyCoeffArr[i*4], tmpManyCoeffArr[i*4+1], tmpManyCoeffArr[i*4+2], tmpManyCoeffArr[i*4+3]); - throw MaCh3Exception(__FILE__ , __LINE__ ); - } - manycoeff_arr[(iCoeff+i)*4+j]=tmpManyCoeffArr[i*4+j]; + manycoeff_arr[(iCoeff+i)*4+j]=tmpManyCoeffArr[i*4+j]; } } delete tmpXCoeffArr; @@ -237,20 +230,20 @@ void splineFDBase::FindSplineSegment() for (int iSyst = 0; iSyst < nUniqueSysts; iSyst++) { int nPoints = UniqueSystNKnots[iSyst]; - std::vector<_float_> xArray = UniqueSystXPts[iSyst]; + std::vector xArray = UniqueSystXPts[iSyst]; // Get the variation for this reconfigure for the ith parameter int GlobalIndex = UniqueSystIndices[iSyst]; - _float_ xvar=_float_(xsec->getParProp(GlobalIndex)); + M3::float_t xvar = M3::float_t(xsec->getParProp(GlobalIndex)); xVarArray[iSyst]=xvar; - _int_ segment = 0; - _int_ kHigh = nPoints - 1; + M3::int_t segment = 0; + M3::int_t kHigh = M3::int_t(nPoints - 1); //KS: We expect new segment is very close to previous - const _int_ PreviousSegment = UniqueSystCurrSegment[iSyst]; + const M3::int_t PreviousSegment = M3::int_t(UniqueSystCurrSegment[iSyst]); //KS: It is quite probable the new segment is same as in previous step so try to avoid binary search if( xArray[PreviousSegment+1] > xvar && xvar >= xArray[PreviousSegment] ){segment = PreviousSegment;} // If the variation is below the lowest saved spline point @@ -263,12 +256,12 @@ void splineFDBase::FindSplineSegment() //KS: It is quite probable the new segment is same as in previous step so try to avoid binary search } else { // The top point we've got - _int_ kHalf = 0; + M3::int_t kHalf = 0; // While there is still a difference in the points (we haven't yet found the segment) // This is a binary search, incrementing segment and decrementing kHalf until we've found the segment while (kHigh - segment > 1) { // Increment the half-step - kHalf = (segment + kHigh)/2; + kHalf = M3::int_t((segment + kHigh)/2); // If our variation is above the kHalf, set the segment to kHalf if (xvar > xArray[kHalf]) { segment = kHalf; @@ -279,7 +272,7 @@ void splineFDBase::FindSplineSegment() } // End the while: we've now done our binary search } // End the else: we've now found our point - if (segment >= nPoints-1 && nPoints > 1){segment = nPoints-2;} + if (segment >= nPoints-1 && nPoints > 1){segment = M3::int_t(nPoints-2);} UniqueSystCurrSegment[iSyst] = segment; //#ifdef DEBUG @@ -291,7 +284,7 @@ void splineFDBase::FindSplineSegment() // std::cerr << "Found segment = " << segment << std::endl; // std::cerr << "Doing variation = " << xvar << std::endl; // std::cerr << "x in spline = " << SplineInfoArray[i].xPts[segment] << std::endl; -// for (__int__ j = 0; j < SplineInfoArray[j].nPts; ++j) { +// for (_M3::int_t_ j = 0; j < SplineInfoArray[j].nPts; ++j) { // std::cerr << " " << j << " = " << SplineInfoArray[i].xPts[j] << std::endl; // } // std::cerr << __FILE__ << ":" << __LINE__ << std::endl; @@ -312,25 +305,30 @@ void splineFDBase::CalcSplineWeights() { int iSpline = uniquecoeffindices[iCoeff]; - short int uniqueIndex=uniquesplinevec_Monolith[iSpline]; - short int currentsegment=UniqueSystCurrSegment[uniqueIndex]; + short int uniqueIndex=short(uniquesplinevec_Monolith[iSpline]); + short int currentsegment=short(UniqueSystCurrSegment[uniqueIndex]); int segCoeff = coeffindexvec[iSpline]+currentsegment; // These are what we can extract from the TSpline3 - _float_ x = xcoeff_arr[segCoeff]; - _float_ y = manycoeff_arr[(segCoeff)*4+kCoeffY]; - _float_ b = manycoeff_arr[(segCoeff)*4+kCoeffB]; - _float_ c = manycoeff_arr[(segCoeff)*4+kCoeffC]; - _float_ d = manycoeff_arr[(segCoeff)*4+kCoeffD]; + M3::float_t x = xcoeff_arr[segCoeff]; + M3::float_t y = manycoeff_arr[(segCoeff)*4+kCoeffY]; + M3::float_t b = manycoeff_arr[(segCoeff)*4+kCoeffB]; + M3::float_t c = manycoeff_arr[(segCoeff)*4+kCoeffC]; + M3::float_t d = manycoeff_arr[(segCoeff)*4+kCoeffD]; // Get the variation for this reconfigure for the ith parameter - _float_ xvar = xVarArray[uniqueIndex]; + M3::float_t xvar = xVarArray[uniqueIndex]; // The Delta(x) - _float_ dx = xvar - x; + M3::float_t dx = xvar - x; //Speedy 1% time boost https://en.cppreference.com/w/c/numeric/math/fma (see ND code!) - _float_ weight = fmaf(dx, fmaf(dx, fmaf(dx, d, c), b), y); + M3::float_t weight = 0; +#ifdef _LOW_MEMORY_STRUCTS_ + weight = std::fmaf(dx, std::fmaf(dx, std::fmaf(dx, d, c), b), y); +#else + weight = std::fma(dx, std::fma(dx, std::fma(dx, d, c), b), y); +#endif //This is the speedy version of writing dx^3+b*dx^2+c*dx+d @@ -338,7 +336,11 @@ void splineFDBase::CalcSplineWeights() //possible with the fmaf line above? if(weight<0){weight=0;} //Stops is getting negative weights - weightvec_Monolith[iSpline]=double(weight); +// LP - ignore the diagnostic here as it is only useless if M3::float_t = double +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuseless-cast" + weightvec_Monolith[iSpline] = double(weight); +#pragma GCC diagnostic pop } } @@ -358,7 +360,7 @@ void splineFDBase::BuildSampleIndexingArray(std::string SampleName) for (int iSyst = 0; iSyst < nSplineSysts; iSyst++) { // Loop over systematics std::vector>>> indexvec_Mode; - int nModesInSyst = SplineModeVecs[iSample][iSyst].size(); + int nModesInSyst = int(SplineModeVecs[iSample][iSyst].size()); for (int iMode = 0; iMode < nModesInSyst; iMode++) { // Loop over modes std::vector>> indexvec_Var1; @@ -392,7 +394,10 @@ std::vector splineFDBase::FindSplineBinning(std::string FileName, std:: std::vector ReturnVec; int iSample=getSampleIndex(SampleName); - TFile *File = new TFile(FileName.c_str()); + TH2F* Hist2D = nullptr; + TH3F* Hist3D = nullptr; + + auto File = std::unique_ptr(TFile::Open(FileName.c_str(), "READ")); if (!File || File->IsZombie()) { MACH3LOG_ERROR("File {} not found", FileName); @@ -406,21 +411,20 @@ std::vector splineFDBase::FindSplineBinning(std::string FileName, std:: bool isHist2D = false; bool isHist3D = false; - TH2F *Hist2D = NULL; - TH3F *Hist3D = NULL; - TObject *Obj = File->Get("dev_tmp_0_0"); + //If you can't find dev_tmp_0_0 then this will cause a problem if (!Obj) { Obj = File->Get("dev_tmp.0.0"); if (!Obj) { - MACH3LOG_ERROR("Error: could not find dev_tmp_0_0 in spline file. Spline binning will not be set!"); + MACH3LOG_ERROR("Error: could not find dev_tmp_0_0 in spline file. Spline binning cannot be set!"); MACH3LOG_ERROR("FileName: {}", FileName); throw MaCh3Exception(__FILE__ , __LINE__ ); } } + //Now check if dev_tmp_0_0 is a TH2 i.e. specifying the dimensions of the splines is 2D if (Obj->IsA() == TH2F::Class()) { isHist2D = true; @@ -445,44 +449,41 @@ std::vector splineFDBase::FindSplineBinning(std::string FileName, std:: MACH3LOG_ERROR("Trying to load a 2D spline template when nDim={}", Dimensions[iSample]); throw MaCh3Exception(__FILE__, __LINE__); } - Hist2D = (TH2F *)File->Get("dev_tmp_0_0"); + Hist2D = File->Get("dev_tmp_0_0"); } if (isHist3D) { + Hist3D = File->Get(("dev_tmp_0_0")); if (Dimensions[iSample] != 3 && Hist3D->GetZaxis()->GetNbins() != 1) { MACH3LOG_ERROR("Trying to load a 3D spline template when nDim={}", Dimensions[iSample]); throw MaCh3Exception(__FILE__ , __LINE__ ); } - Hist3D = (TH3F *)Obj->Clone(); } - int nDummyBins = 1; - double *DummyEdges = new double[2]; - DummyEdges[0] = -1e15; - DummyEdges[1] = 1e15; - TAxis *DummyAxis = new TAxis(nDummyBins, DummyEdges); + double DummyEdges[2] = {-1e-15, 1e15}; + auto DummyAxis = std::unique_ptr(new TAxis(1, DummyEdges)); if (Dimensions[iSample] == 2) { if(isHist2D){ - ReturnVec.push_back((TAxis *)(Hist2D->GetXaxis())->Clone()); - ReturnVec.push_back((TAxis *)(Hist2D->GetYaxis())->Clone()); - ReturnVec.push_back((TAxis *)(DummyAxis)->Clone()); + ReturnVec.push_back(static_cast(Hist2D->GetXaxis()->Clone())); + ReturnVec.push_back(static_cast(Hist2D->GetYaxis()->Clone())); + ReturnVec.push_back(static_cast(DummyAxis->Clone())); } else if(isHist3D){ - ReturnVec.push_back((TAxis *)(Hist3D->GetXaxis())->Clone()); - ReturnVec.push_back((TAxis *)(Hist3D->GetYaxis())->Clone()); - ReturnVec.push_back((TAxis *)(DummyAxis)->Clone()); + ReturnVec.push_back(static_cast(Hist3D->GetXaxis()->Clone())); + ReturnVec.push_back(static_cast(Hist3D->GetYaxis()->Clone())); + ReturnVec.push_back(static_cast(DummyAxis->Clone())); } } else if (Dimensions[iSample] == 3) { - ReturnVec.push_back((TAxis *)(Hist3D->GetXaxis())->Clone()); - ReturnVec.push_back((TAxis *)(Hist3D->GetYaxis())->Clone()); - ReturnVec.push_back((TAxis *)(Hist3D->GetZaxis())->Clone()); + ReturnVec.push_back(static_cast(Hist3D->GetXaxis()->Clone())); + ReturnVec.push_back(static_cast(Hist3D->GetYaxis()->Clone())); + ReturnVec.push_back(static_cast(Hist3D->GetZaxis()->Clone())); } else { @@ -496,16 +497,11 @@ std::vector splineFDBase::FindSplineBinning(std::string FileName, std:: } MACH3LOG_INFO("Left PrintBinning now tidying up"); - //This could be NULL if 2D - if(isHist2D){ - delete Hist2D; - } else { - delete Hist3D; - } + //Make sure these actually get deleted + delete Hist2D; + delete Hist3D; File->Close(); - delete File; - delete DummyAxis; return ReturnVec; } @@ -663,13 +659,13 @@ void splineFDBase::PrepForReweight() }//Syst loop end } - nUniqueSysts = UniqueSystSplines.size(); + nUniqueSysts = int(UniqueSystSplines.size()); // DB Find the number of splines knots which assumes each instance of the syst has the same number of knots UniqueSystNKnots.resize(nUniqueSysts); UniqueSystCurrSegment.resize(nUniqueSysts); UniqueSystXPts.resize(nUniqueSysts); - xVarArray=new _float_[nUniqueSysts]; + xVarArray=new M3::float_t[nUniqueSysts]; for (int iSpline = 0; iSpline < nUniqueSysts; iSpline++) { @@ -677,14 +673,9 @@ void splineFDBase::PrepForReweight() UniqueSystXPts[iSpline].resize(UniqueSystNKnots[iSpline]); for (int iKnot = 0; iKnot < UniqueSystNKnots[iSpline]; iKnot++) { - _float_ xPoint = -999; - _float_ yPoint = -999; + M3::float_t xPoint; + M3::float_t yPoint; UniqueSystSplines[iSpline]->GetKnot(iKnot, xPoint, yPoint); - if (xPoint == -999 || yPoint == -999) - { - MACH3LOG_ERROR("Something has gone wrong in the knot finding"); - throw MaCh3Exception(__FILE__ , __LINE__ ); - } UniqueSystXPts[iSpline][iKnot] = xPoint; } //ETA - let this just be set as the first segment by default @@ -693,8 +684,7 @@ void splineFDBase::PrepForReweight() } - std::cout << "nUniqueSysts:" << nUniqueSysts << " -----------------" << std::endl; - std::cout << std::endl; + MACH3LOG_INFO("nUniqueSysts: {}", nUniqueSysts); std::cout << std::setw(15) << "Spline Index" << " | " << std::setw(20) << "Syst Name" @@ -748,8 +738,9 @@ void splineFDBase::PrepForReweight() //**************************************** // Rather work with spline coefficients in the splines, let's copy ND and use coefficient arrays -void splineFDBase::getSplineCoeff_SepMany(int splineindex, _float_* &xArray, _float_* &manyArray){ +void splineFDBase::getSplineCoeff_SepMany(int splineindex, M3::float_t* &xArray, M3::float_t* &manyArray){ //**************************************** + // Initialise all arrays to 1.0 int nPoints; //No point evaluating a flat spline @@ -764,32 +755,27 @@ void splineFDBase::getSplineCoeff_SepMany(int splineindex, _float_* &xArray, _fl for(int i=0; iGetCoeff(i, x, y, b, c, d); - //Let's save some memory and store them as floats! (It's a surprise tool that will help with GPU later) - xArray[i]=_float_(x); - - //Might as well copy ND here and - xArray[i] = _float_(x); - manyArray[i*4] = _float_(y); // 4 because manyArray stores y,b,c,d - manyArray[i*4+1] = _float_(b); - manyArray[i*4+2] = _float_(c); - manyArray[i*4+3] = _float_(d); - - if((xArray[i] == -999) | (manyArray[i*4] == -999) | (manyArray[i*4+1] == -999) | (manyArray[i*4+2] == -999) | (manyArray[i*4+3] == -999)){ - MACH3LOG_ERROR("*********** Bad params in getSplineCoeff_SepMany() ************"); - MACH3LOG_ERROR("pre cast to _float_ (x, y, b, c, d) = {}, {}, {}, {}, {}",x, y, b, c, d); - MACH3LOG_ERROR("post cast to float (x, y, b, c, d) = {}, {}, {}, {}, {}",xArray[i], manyArray[i*4], manyArray[i*4+1], manyArray[i*4+2], manyArray[i*4+3]); - throw MaCh3Exception(__FILE__ , __LINE__ ); - } + + // Store the coefficients for each knot contiguously in memory + // 4 because manyArray stores y,b,c,d + xArray[i] = x; + manyArray[i*4] = y; + manyArray[i*4+1] = b; + manyArray[i*4+2] = c; + manyArray[i*4+3] = d; } + //We now clean up the splines! delete splinevec_Monolith[splineindex]; - splinevec_Monolith[splineindex] = NULL; + splinevec_Monolith[splineindex] = nullptr; } //**************************************** @@ -844,12 +830,12 @@ void splineFDBase::PrintArrayDetails(std::string SampleName) //**************************************** { int iSample = getSampleIndex(SampleName); - int nOscChannels = indexvec[iSample].size(); + int nOscChannels = int(indexvec[iSample].size()); MACH3LOG_INFO("Sample {} has {} oscillation channels", iSample, nOscChannels); for (int iOscChan = 0; iOscChan < nOscChannels; iOscChan++) { - int nSysts = indexvec[iSample][iOscChan].size(); + int nSysts = int(indexvec[iSample][iOscChan].size()); MACH3LOG_INFO("Oscillation channel {} has {} systematics", iOscChan, nSysts); } } @@ -915,43 +901,43 @@ bool splineFDBase::isValidSplineIndex(std::string SampleName, int iOscChan, int int iSample=getSampleIndex(SampleName); bool isValid = true; - if (iSample < 0 || iSample >= (int)indexvec.size()) + if (iSample < 0 || iSample >= int(indexvec.size())) { MACH3LOG_ERROR("Sample index is invalid! 0 <= Index < {} ", indexvec.size()); isValid = false; } - if (iOscChan < 0 || iOscChan >= (int)indexvec[iSample].size()) + if (iOscChan < 0 || iOscChan >= int(indexvec[iSample].size())) { MACH3LOG_ERROR("OscChan index is invalid! 0 <= Index < {} ", indexvec[iSample].size()); isValid = false; } - if (iSyst < 0 || iSyst >= (int)indexvec[iSample][iOscChan].size()) + if (iSyst < 0 || iSyst >= int(indexvec[iSample][iOscChan].size())) { MACH3LOG_ERROR("Syst index is invalid! 0 <= Index < {} ", indexvec[iSample][iOscChan].size()); isValid = false; } - if (iMode < 0 || iMode >= (int)indexvec[iSample][iOscChan][iSyst].size()) + if (iMode < 0 || iMode >= int(indexvec[iSample][iOscChan][iSyst].size())) { MACH3LOG_ERROR("Mode index is invalid! 0 <= Index < {} ", indexvec[iSample][iOscChan][iSyst].size()); isValid = false; } - if (iVar1 < 0 || iVar1 >= (int)indexvec[iSample][iOscChan][iSyst][iMode].size()) + if (iVar1 < 0 || iVar1 >= int(indexvec[iSample][iOscChan][iSyst][iMode].size())) { MACH3LOG_ERROR("Var1 index is invalid! 0 <= Index < {} ", indexvec[iSample][iOscChan][iSyst][iMode].size()); isValid = false; } - if (iVar2 < 0 || iVar2 >= (int)indexvec[iSample][iOscChan][iSyst][iMode][iVar1].size()) + if (iVar2 < 0 || iVar2 >= int(indexvec[iSample][iOscChan][iSyst][iMode][iVar1].size())) { MACH3LOG_ERROR("Var2 index is invalid! 0 <= Index < {} ", indexvec[iSample][iOscChan][iSyst][iMode][iVar1].size()); isValid = false; } - if (iVar3 < 0 || iVar3 >= (int)indexvec[iSample][iOscChan][iSyst][iMode][iVar1][iVar2].size()) + if (iVar3 < 0 || iVar3 >= int(indexvec[iSample][iOscChan][iSyst][iMode][iVar1][iVar2].size())) { MACH3LOG_ERROR("Var3 index is invalid! 0 <= Index < {} ", indexvec[iSample][iOscChan][iSyst][iMode][iVar1][iVar2].size()); isValid = false; diff --git a/splines/splineFDBase.h b/splines/splineFDBase.h index 8b99d4026..a49a5ca02 100644 --- a/splines/splineFDBase.h +++ b/splines/splineFDBase.h @@ -50,7 +50,7 @@ class splineFDBase : public SplineBase { void BuildSampleIndexingArray(std::string SampleName); void PrepForReweight(); - void getSplineCoeff_SepMany(int splineindex, _float_ *& xArray, _float_ *&manyArray); + void getSplineCoeff_SepMany(int splineindex, M3::float_t *& xArray, M3::float_t *&manyArray); void PrintBinning(TAxis* Axis); void PrintSampleDetails(std::string SampleName); void PrintArrayDetails(std::string SampleName); @@ -97,7 +97,7 @@ class splineFDBase : public SplineBase { std::vector UniqueSystIndices; std::vector UniqueSystNKnots; std::vector UniqueSystCurrSegment; - std::vector< std::vector<_float_> > UniqueSystXPts; + std::vector< std::vector > UniqueSystXPts; /// @brief Variables related to determined which modes have splines and which piggy-back of other modes std::vector< std::vector< std::vector< std::vector< std::vector< std::vector< std::vector< int > > > > > > > indexvec; @@ -111,10 +111,10 @@ class splineFDBase : public SplineBase { int CoeffIndex; //Probably need to clear these arrays up at some point - _float_ *xVarArray; + M3::float_t *xVarArray; bool *isflatarray; // Need to keep track of which splines are flat and which aren't - _float_ *xcoeff_arr; //x coefficients for each spline - _float_ *manycoeff_arr; //ybcd coefficients for each spline + M3::float_t *xcoeff_arr; //x coefficients for each spline + M3::float_t *manycoeff_arr; //ybcd coefficients for each spline std::vector weightvec_Monolith; std::vector uniquesplinevec_Monolith;