From 9d4aa868ea1556b8cfada22e79021eb2afc481d6 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Tue, 7 Jan 2025 11:43:38 +0000 Subject: [PATCH 01/11] Build example 3 if CUDA and MPI enabled --- examples/CMakeLists.txt | 4 +++- src/CMakeLists.txt | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 0bafb40e..827151cc 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,7 +1,9 @@ if(CMAKE_BUILD_TESTS) add_subdirectory(1_SimpleNet) add_subdirectory(2_ResNet18) - # add_subdirectory(3_MultiGPU) + if(ENABLE_CUDA AND ENABLE_MPI) + add_subdirectory(3_MultiGPU) + endif() add_subdirectory(4_MultiIO) # add_subdirectory(5_Looping) add_subdirectory(6_Autograd) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cc79b58c..40f9d252 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -125,8 +125,10 @@ if(CMAKE_BUILD_TESTS) DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/test/examples) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../examples/2_ResNet18 DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/test/examples) - # file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../examples/3_MultiGPU - # DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/test/examples) + if(ENABLE_CUDA AND ENABLE_MPI) + file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../examples/3_MultiGPU + DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/test/examples) + endif() file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../examples/4_MultiIO DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/test/examples) # file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/../examples/5_Looping From 5f4418aca1643cbd760d07b3579341339179842b Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 14:13:35 +0000 Subject: [PATCH 02/11] Put model on CUDA device in simplenet --- examples/3_MultiGPU/simplenet.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/3_MultiGPU/simplenet.py b/examples/3_MultiGPU/simplenet.py index 81f65cbc..b7c2e6b2 100644 --- a/examples/3_MultiGPU/simplenet.py +++ b/examples/3_MultiGPU/simplenet.py @@ -42,7 +42,7 @@ def forward(self, batch: torch.Tensor) -> torch.Tensor: if __name__ == "__main__": - model = SimpleNet() + model = SimpleNet().to(torch.device("cuda")) model.eval() input_tensor = torch.Tensor([0.0, 1.0, 2.0, 3.0, 4.0]) @@ -50,4 +50,5 @@ def forward(self, batch: torch.Tensor) -> torch.Tensor: print(f"SimpleNet forward pass on CUDA device {input_tensor_gpu.get_device()}") with torch.no_grad(): - print(model(input_tensor_gpu)) + output = model(input_tensor_gpu) + print(output) From b864b909ebfefc1ed3d48fcdc2a04de129690323 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 14:15:16 +0000 Subject: [PATCH 03/11] Run example 3 if it's been built --- run_test_suite.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/run_test_suite.sh b/run_test_suite.sh index e61613c1..33e9943b 100755 --- a/run_test_suite.sh +++ b/run_test_suite.sh @@ -66,7 +66,11 @@ ctest "${CTEST_ARGS}" cd - # Run integration tests -EXAMPLES="1_SimpleNet 2_ResNet18 4_MultiIO 6_Autograd" +if [ -e "${BUILD_DIR}/test/examples/3_MultiGPU" ]; then + EXAMPLES="1_SimpleNet 2_ResNet18 3_MultiGPU 4_MultiIO 6_Autograd" +else + EXAMPLES="1_SimpleNet 2_ResNet18 4_MultiIO 6_Autograd" +fi for EXAMPLE in ${EXAMPLES}; do pip -q install -r examples/"${EXAMPLE}"/requirements.txt cd "${BUILD_DIR}"/test/examples/"${EXAMPLE}" From 911989b46b0056379bfb206e73dd1a6a97cef20a Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 14:24:44 +0000 Subject: [PATCH 04/11] Add missing imports for pt2ts --- examples/3_MultiGPU/pt2ts.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/3_MultiGPU/pt2ts.py b/examples/3_MultiGPU/pt2ts.py index 3bddf104..0ada5da4 100644 --- a/examples/3_MultiGPU/pt2ts.py +++ b/examples/3_MultiGPU/pt2ts.py @@ -1,5 +1,7 @@ """Load a PyTorch model and convert it to TorchScript.""" +import os +import sys from typing import Optional # FPTLIB-TODO From 459132554e64fe41c42ad78563e03adcf8b069f8 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 14:25:51 +0000 Subject: [PATCH 05/11] More helpful output for simplenet_infer_python --- examples/3_MultiGPU/simplenet_infer_python.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/3_MultiGPU/simplenet_infer_python.py b/examples/3_MultiGPU/simplenet_infer_python.py index 5bf6202c..83ee3cb6 100644 --- a/examples/3_MultiGPU/simplenet_infer_python.py +++ b/examples/3_MultiGPU/simplenet_infer_python.py @@ -60,4 +60,4 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: with torch.no_grad(): result = deploy(saved_model_file, device_to_run, batch_size_to_run) - print(f"{rank}: {result}") + print(f"Output on device {device_to_run}: {result}") From b4246c42d8d27c0fbb370ea9c2970d42e9e3c3c5 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 14:37:04 +0000 Subject: [PATCH 06/11] Fix numbering in CMakeLists for examples --- examples/1_SimpleNet/CMakeLists.txt | 6 +++--- examples/2_ResNet18/CMakeLists.txt | 4 ++-- examples/4_MultiIO/CMakeLists.txt | 6 +++--- examples/6_Autograd/CMakeLists.txt | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/1_SimpleNet/CMakeLists.txt b/examples/1_SimpleNet/CMakeLists.txt index fd36a4a9..3a1a2b41 100644 --- a/examples/1_SimpleNet/CMakeLists.txt +++ b/examples/1_SimpleNet/CMakeLists.txt @@ -29,7 +29,7 @@ if(CMAKE_BUILD_TESTS) add_test(NAME simplenet COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/simplenet.py) - # 1. Check the model is saved to file in the expected location with the + # 2. Check the model is saved to file in the expected location with the # pt2ts.py script add_test( NAME pt2ts @@ -38,7 +38,7 @@ if(CMAKE_BUILD_TESTS) # the model WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - # 1. Check the model can be loaded from file and run in Python and that its + # 3. Check the model can be loaded from file and run in Python and that its # outputs meet expectations add_test( NAME simplenet_infer_python @@ -47,7 +47,7 @@ if(CMAKE_BUILD_TESTS) # model WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - # 1. Check the model can be loaded from file and run in Fortran and that its + # 4. Check the model can be loaded from file and run in Fortran and that its # outputs meet expectations add_test( NAME simplenet_infer_fortran diff --git a/examples/2_ResNet18/CMakeLists.txt b/examples/2_ResNet18/CMakeLists.txt index 9af36f7e..b68db963 100644 --- a/examples/2_ResNet18/CMakeLists.txt +++ b/examples/2_ResNet18/CMakeLists.txt @@ -31,7 +31,7 @@ if(CMAKE_BUILD_TESTS) COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/resnet18.py WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) - # 1. Check the model is saved to file in the expected location with the + # 2. Check the model is saved to file in the expected location with the # pt2ts.py script add_test( NAME pt2ts @@ -40,7 +40,7 @@ if(CMAKE_BUILD_TESTS) # the model WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - # 1. Check the model can be loaded from file and run in Fortran and that its + # 3. Check the model can be loaded from file and run in Fortran and that its # outputs meet expectations add_test( NAME resnet_infer_fortran diff --git a/examples/4_MultiIO/CMakeLists.txt b/examples/4_MultiIO/CMakeLists.txt index 8169a8ad..2a4fdbdc 100644 --- a/examples/4_MultiIO/CMakeLists.txt +++ b/examples/4_MultiIO/CMakeLists.txt @@ -29,7 +29,7 @@ if(CMAKE_BUILD_TESTS) add_test(NAME multiionet COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/multiionet.py) - # 1. Check the model is saved to file in the expected location with the + # 2. Check the model is saved to file in the expected location with the # pt2ts.py script add_test( NAME pt2ts @@ -38,7 +38,7 @@ if(CMAKE_BUILD_TESTS) # the model WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - # 1. Check the model can be loaded from file and run in Python and that its + # 3. Check the model can be loaded from file and run in Python and that its # outputs meet expectations add_test( NAME multiionet_infer_python @@ -47,7 +47,7 @@ if(CMAKE_BUILD_TESTS) ${PROJECT_BINARY_DIR} # Command line argument: filepath to find the model WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - # 1. Check the model can be loaded from file and run in Fortran and that its + # 4. Check the model can be loaded from file and run in Fortran and that its # outputs meet expectations add_test( NAME multiionet_infer_fortran diff --git a/examples/6_Autograd/CMakeLists.txt b/examples/6_Autograd/CMakeLists.txt index bbe62b32..607493bf 100644 --- a/examples/6_Autograd/CMakeLists.txt +++ b/examples/6_Autograd/CMakeLists.txt @@ -29,7 +29,7 @@ if(CMAKE_BUILD_TESTS) add_test(NAME pyautograd COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/autograd.py) - # 1. Check the Fortran Autograd script runs successfully + # 2. Check the Fortran Autograd script runs successfully add_test( NAME fautograd COMMAND autograd From 28e37781f7ff8de2dd0be381471268e6256a31d9 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 15:02:22 +0000 Subject: [PATCH 07/11] Renaming in MultiGPU example; set up unit testing --- examples/3_MultiGPU/CMakeLists.txt | 52 +++++++++++++++++-- .../3_MultiGPU/{simplenet.py => multigpu.py} | 4 +- ...fortran.f90 => multigpu_infer_fortran.f90} | 0 ...fer_python.py => multigpu_infer_python.py} | 15 ++++-- examples/3_MultiGPU/pt2ts.py | 6 +-- 5 files changed, 64 insertions(+), 13 deletions(-) rename examples/3_MultiGPU/{simplenet.py => multigpu.py} (94%) rename examples/3_MultiGPU/{simplenet_infer_fortran.f90 => multigpu_infer_fortran.f90} (100%) rename examples/3_MultiGPU/{simplenet_infer_python.py => multigpu_infer_python.py} (84%) diff --git a/examples/3_MultiGPU/CMakeLists.txt b/examples/3_MultiGPU/CMakeLists.txt index 9820c4bf..b4fdd5c4 100644 --- a/examples/3_MultiGPU/CMakeLists.txt +++ b/examples/3_MultiGPU/CMakeLists.txt @@ -18,7 +18,53 @@ find_package(FTorch) find_package(MPI REQUIRED) message(STATUS "Building with Fortran PyTorch coupling") +check_language(CUDA) +if(CMAKE_CUDA_COMPILER) + enable_language(CUDA) +else() + message(WARNING "No CUDA support") +endif() + # Fortran example -add_executable(simplenet_infer_fortran_gpu simplenet_infer_fortran.f90) -target_link_libraries(simplenet_infer_fortran_gpu PRIVATE FTorch::ftorch) -target_link_libraries(simplenet_infer_fortran_gpu PRIVATE MPI::MPI_Fortran) +add_executable(multigpu_infer_fortran multigpu_infer_fortran.f90) +target_link_libraries(multigpu_infer_fortran PRIVATE FTorch::ftorch) +target_link_libraries(multigpu_infer_fortran PRIVATE MPI::MPI_Fortran) + +# Integration testing +if (CMAKE_BUILD_TESTS) + include(CTest) + + # 1. Check the PyTorch model runs and its outputs meet expectations + add_test(NAME multigpu COMMAND ${Python_EXECUTABLE} + ${PROJECT_SOURCE_DIR}/multigpu.py) + + # 2. Check the model is saved to file in the expected location with the + # pt2ts.py script + add_test( + NAME pt2ts + COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/pt2ts.py + ${PROJECT_BINARY_DIR} # Command line argument: filepath for saving + # the model + WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) + + # 3. Check the model can be loaded from file and run in Python and that its + # outputs meet expectations + add_test( + NAME multigpu_infer_python + COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/multigpu_infer_python.py + ${PROJECT_BINARY_DIR} # Command line argument: filepath to find the + # model + WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) + + # 4. Check the model can be loaded from file and run in Fortran and that its + # outputs meet expectations + add_test( + NAME multigpu_infer_fortran + COMMAND + multigpu_infer_fortran ${PROJECT_BINARY_DIR}/saved_multigpu_model_cpu.pt + # Command line argument: model file + WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) + set_tests_properties( + multigpu_infer_fortran PROPERTIES PASS_REGULAR_EXPRESSION + "MultiGPU example ran successfully") +endif() diff --git a/examples/3_MultiGPU/simplenet.py b/examples/3_MultiGPU/multigpu.py similarity index 94% rename from examples/3_MultiGPU/simplenet.py rename to examples/3_MultiGPU/multigpu.py index b7c2e6b2..e2b70655 100644 --- a/examples/3_MultiGPU/simplenet.py +++ b/examples/3_MultiGPU/multigpu.py @@ -4,7 +4,7 @@ from torch import nn -class SimpleNet(nn.Module): +class MultiGPUNet(nn.Module): """PyTorch module multiplying an input vector by 2.""" def __init__( @@ -42,7 +42,7 @@ def forward(self, batch: torch.Tensor) -> torch.Tensor: if __name__ == "__main__": - model = SimpleNet().to(torch.device("cuda")) + model = MultiGPUNet().to(torch.device("cuda")) model.eval() input_tensor = torch.Tensor([0.0, 1.0, 2.0, 3.0, 4.0]) diff --git a/examples/3_MultiGPU/simplenet_infer_fortran.f90 b/examples/3_MultiGPU/multigpu_infer_fortran.f90 similarity index 100% rename from examples/3_MultiGPU/simplenet_infer_fortran.f90 rename to examples/3_MultiGPU/multigpu_infer_fortran.f90 diff --git a/examples/3_MultiGPU/simplenet_infer_python.py b/examples/3_MultiGPU/multigpu_infer_python.py similarity index 84% rename from examples/3_MultiGPU/simplenet_infer_python.py rename to examples/3_MultiGPU/multigpu_infer_python.py index 83ee3cb6..139f4b49 100644 --- a/examples/3_MultiGPU/simplenet_infer_python.py +++ b/examples/3_MultiGPU/multigpu_infer_python.py @@ -1,7 +1,13 @@ -"""Load saved SimpleNet to TorchScript and run inference example.""" +"""Load saved MultiGPUNet to TorchScript and run inference example.""" import torch -from mpi4py import MPI +try: + from mpi4py import MPI + rank = MPI.COMM_WORLD.rank +except ModuleNotFoundError: + from warnings import warn + warn("Running with rank 0 under the assumption that MPI is not being used.") + rank = 0 def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: @@ -25,7 +31,7 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: input_tensor = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0]).repeat(batch_size, 1) # Add the rank (device index) to each tensor to make them differ - input_tensor += MPI.COMM_WORLD.rank + input_tensor += rank if device == "cpu": # Load saved TorchScript model @@ -50,9 +56,8 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: if __name__ == "__main__": - saved_model_file = "saved_simplenet_model_cuda.pt" + saved_model_file = "saved_multigpu_model_cuda.pt" - rank = MPI.COMM_WORLD.rank device_to_run = f"cuda:{rank}" batch_size_to_run = 1 diff --git a/examples/3_MultiGPU/pt2ts.py b/examples/3_MultiGPU/pt2ts.py index 0ada5da4..4517f24d 100644 --- a/examples/3_MultiGPU/pt2ts.py +++ b/examples/3_MultiGPU/pt2ts.py @@ -7,7 +7,7 @@ # FPTLIB-TODO # Add a module import with your model here: # This example assumes the model architecture is in an adjacent module `my_ml_model.py` -import simplenet +import multigpu import torch @@ -81,7 +81,7 @@ def load_torchscript(filename: Optional[str] = "saved_model.pt") -> torch.nn.Mod # Insert code here to load your model as `trained_model`. # This example assumes my_ml_model has a method `initialize` to load # architecture, weights, and place in inference mode - trained_model = simplenet.SimpleNet() + trained_model = multigpu.MultiGPUNet() # Switch off specific layers/parts of the model that behave # differently during training and inference. @@ -117,7 +117,7 @@ def load_torchscript(filename: Optional[str] = "saved_model.pt") -> torch.nn.Mod # FPTLIB-TODO # Set the name of the file you want to save the torchscript model to: - saved_ts_filename = "saved_simplenet_model_cuda.pt" + saved_ts_filename = "saved_multigpu_model_cuda.pt" # A filepath may also be provided. To do this, pass the filepath as an argument to # this script when it is run from the command line, i.e. `./pt2ts.py path/to/model`. From 5fa780190961b46a90059626b8b0285f0fcf52ba Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 15:08:32 +0000 Subject: [PATCH 08/11] Raise error if no CUDA in example 3 --- examples/3_MultiGPU/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/3_MultiGPU/CMakeLists.txt b/examples/3_MultiGPU/CMakeLists.txt index b4fdd5c4..464e37a5 100644 --- a/examples/3_MultiGPU/CMakeLists.txt +++ b/examples/3_MultiGPU/CMakeLists.txt @@ -22,7 +22,7 @@ check_language(CUDA) if(CMAKE_CUDA_COMPILER) enable_language(CUDA) else() - message(WARNING "No CUDA support") + message(ERROR "No CUDA support") endif() # Fortran example From 8c27dc1ce7db3b801495222fc1f2b93936155e06 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 15:08:38 +0000 Subject: [PATCH 09/11] Lint --- examples/3_MultiGPU/multigpu_infer_python.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/3_MultiGPU/multigpu_infer_python.py b/examples/3_MultiGPU/multigpu_infer_python.py index 139f4b49..27329f74 100644 --- a/examples/3_MultiGPU/multigpu_infer_python.py +++ b/examples/3_MultiGPU/multigpu_infer_python.py @@ -3,9 +3,11 @@ import torch try: from mpi4py import MPI + rank = MPI.COMM_WORLD.rank except ModuleNotFoundError: from warnings import warn + warn("Running with rank 0 under the assumption that MPI is not being used.") rank = 0 From 68f111ae9455912754623b3baa6fe05902717e5f Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 15:34:44 +0000 Subject: [PATCH 10/11] Fix model filename passed to fortran --- examples/3_MultiGPU/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/3_MultiGPU/CMakeLists.txt b/examples/3_MultiGPU/CMakeLists.txt index 464e37a5..99315d53 100644 --- a/examples/3_MultiGPU/CMakeLists.txt +++ b/examples/3_MultiGPU/CMakeLists.txt @@ -61,7 +61,7 @@ if (CMAKE_BUILD_TESTS) add_test( NAME multigpu_infer_fortran COMMAND - multigpu_infer_fortran ${PROJECT_BINARY_DIR}/saved_multigpu_model_cpu.pt + multigpu_infer_fortran ${PROJECT_BINARY_DIR}/saved_multigpu_model_cuda.pt # Command line argument: model file WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) set_tests_properties( From 3f3cd53cf18e162073a68d5f501418db75f44503 Mon Sep 17 00:00:00 2001 From: Joe Wallwork Date: Mon, 13 Jan 2025 15:46:33 +0000 Subject: [PATCH 11/11] Do require mpi4py in Python script --- examples/3_MultiGPU/multigpu_infer_python.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/examples/3_MultiGPU/multigpu_infer_python.py b/examples/3_MultiGPU/multigpu_infer_python.py index 27329f74..64a89174 100644 --- a/examples/3_MultiGPU/multigpu_infer_python.py +++ b/examples/3_MultiGPU/multigpu_infer_python.py @@ -1,15 +1,7 @@ """Load saved MultiGPUNet to TorchScript and run inference example.""" import torch -try: - from mpi4py import MPI - - rank = MPI.COMM_WORLD.rank -except ModuleNotFoundError: - from warnings import warn - - warn("Running with rank 0 under the assumption that MPI is not being used.") - rank = 0 +from mpi4py import MPI def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: @@ -33,7 +25,7 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: input_tensor = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0]).repeat(batch_size, 1) # Add the rank (device index) to each tensor to make them differ - input_tensor += rank + input_tensor += MPI.COMM_WORLD.rank if device == "cpu": # Load saved TorchScript model @@ -60,7 +52,7 @@ def deploy(saved_model: str, device: str, batch_size: int = 1) -> torch.Tensor: if __name__ == "__main__": saved_model_file = "saved_multigpu_model_cuda.pt" - device_to_run = f"cuda:{rank}" + device_to_run = f"cuda:{MPI.COMM_WORLD.rank}" batch_size_to_run = 1