diff --git a/qa/L0_dlpack_multi_gpu/test.sh b/qa/L0_dlpack_multi_gpu/test.sh index ae72daa7d0..a90169780c 100755 --- a/qa/L0_dlpack_multi_gpu/test.sh +++ b/qa/L0_dlpack_multi_gpu/test.sh @@ -41,7 +41,7 @@ source ../common/util.sh # Uninstall the non CUDA version of PyTorch pip3 uninstall -y torch -pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html +pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html pip3 install tensorflow # Install CuPy for testing non_blocking compute streams diff --git a/qa/L0_infer/test.sh b/qa/L0_infer/test.sh index b8f8b1ebcf..2d54fd8965 100755 --- a/qa/L0_infer/test.sh +++ b/qa/L0_infer/test.sh @@ -144,9 +144,9 @@ export BATCH if [[ $BACKENDS == *"python_dlpack"* ]]; then if [[ "aarch64" != $(uname -m) ]] ; then - pip3 install torch==2.2.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html else - pip3 install torch==2.2.0 -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html fi fi @@ -352,9 +352,9 @@ if [ "$TEST_VALGRIND" -eq 1 ]; then TESTING_BACKENDS="python python_dlpack onnx" EXPECTED_NUM_TESTS=42 if [[ "aarch64" != $(uname -m) ]] ; then - pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html else - pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html fi for BACKENDS in $TESTING_BACKENDS; do diff --git a/qa/L0_io/test.sh b/qa/L0_io/test.sh index 84ab4fb0c0..85d45ad7d7 100755 --- a/qa/L0_io/test.sh +++ b/qa/L0_io/test.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -54,7 +54,7 @@ LD_LIBRARY_PATH=/opt/tritonserver/lib:$LD_LIBRARY_PATH rm -f $CLIENT_LOG* # PyTorch is required for the Python backend dlpack add sub models -pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html +pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html RET=0 # Prepare float32 models with basic config diff --git a/qa/L0_libtorch_instance_group_kind_model/test.sh b/qa/L0_libtorch_instance_group_kind_model/test.sh index 04d76bd036..9cbb6bf550 100755 --- a/qa/L0_libtorch_instance_group_kind_model/test.sh +++ b/qa/L0_libtorch_instance_group_kind_model/test.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -39,7 +39,7 @@ if [ ! -z "$TEST_REPO_ARCH" ]; then fi pip3 uninstall -y torch -pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html +pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html DATADIR=/data/inferenceserver/${REPO_VERSION}/qa_model_repository SERVER=/opt/tritonserver/bin/tritonserver diff --git a/qa/L0_trace/opentelemetry_unittest.py b/qa/L0_trace/opentelemetry_unittest.py index 34dc0bfd88..9fa0b7bb85 100644 --- a/qa/L0_trace/opentelemetry_unittest.py +++ b/qa/L0_trace/opentelemetry_unittest.py @@ -221,7 +221,7 @@ def _check_events(self, span_name, events, is_cancelled): self.assertFalse( all(entry in events for entry in root_events_http + root_events_grpc) ) - self.assertEquals(len(events), len(compute_events)) + self.assertEqual(len(events), len(compute_events)) elif span_name == self.root_span: # Check that root span has INFER_RESPONSE_COMPLETE, _RECV/_WAITREAD @@ -233,12 +233,12 @@ def _check_events(self, span_name, events, is_cancelled): if "HTTP" in events: self.assertTrue(all(entry in events for entry in root_events_http)) self.assertFalse(all(entry in events for entry in root_events_grpc)) - self.assertEquals(len(events), len(root_events_http)) + self.assertEqual(len(events), len(root_events_http)) elif "GRPC" in events: self.assertTrue(all(entry in events for entry in root_events_grpc)) self.assertFalse(all(entry in events for entry in root_events_http)) - self.assertEquals(len(events), len(root_events_grpc)) + self.assertEqual(len(events), len(root_events_grpc)) if is_cancelled == False: self.assertFalse(all(entry in events for entry in request_events)) @@ -254,7 +254,7 @@ def _check_events(self, span_name, events, is_cancelled): all(entry in events for entry in root_events_http + root_events_grpc) ) self.assertFalse(all(entry in events for entry in compute_events)) - self.assertEquals(len(events), len(request_events)) + self.assertEqual(len(events), len(request_events)) elif span_name.startswith("CUSTOM_ACTIVITY"): custom_activity_events = [] @@ -276,7 +276,7 @@ def _check_events(self, span_name, events, is_cancelled): all(entry in events for entry in custom_activity_events), "Span " + span_name, ) - self.assertEquals( + self.assertEqual( len(events), len(custom_activity_events), "Span " + span_name ) diff --git a/qa/L0_warmup/test.sh b/qa/L0_warmup/test.sh index a535aed25b..d307b7ce07 100755 --- a/qa/L0_warmup/test.sh +++ b/qa/L0_warmup/test.sh @@ -415,7 +415,7 @@ wait $SERVER_PID # Test the onnx model to verify that the memory type of the output tensor # remains unchanged with the warmup setting pip3 uninstall -y torch -pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html +pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html rm -fr models && mkdir models cp -r /data/inferenceserver/${REPO_VERSION}/qa_model_repository/onnx_nobatch_float32_float32_float32 models/.