Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Fix tests for ubuntu 24.04. upgrade #7791

Merged
merged 5 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion qa/L0_dlpack_multi_gpu/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ source ../common/util.sh

# Uninstall the non CUDA version of PyTorch
pip3 uninstall -y torch
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install tensorflow

# Install CuPy for testing non_blocking compute streams
Expand Down
8 changes: 4 additions & 4 deletions qa/L0_infer/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@ export BATCH

if [[ $BACKENDS == *"python_dlpack"* ]]; then
if [[ "aarch64" != $(uname -m) ]] ; then
pip3 install torch==2.2.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
else
pip3 install torch==2.2.0 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html
fi
fi

Expand Down Expand Up @@ -352,9 +352,9 @@ if [ "$TEST_VALGRIND" -eq 1 ]; then
TESTING_BACKENDS="python python_dlpack onnx"
EXPECTED_NUM_TESTS=42
if [[ "aarch64" != $(uname -m) ]] ; then
pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
else
pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html
fi

for BACKENDS in $TESTING_BACKENDS; do
Expand Down
4 changes: 2 additions & 2 deletions qa/L0_io/test.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -54,7 +54,7 @@ LD_LIBRARY_PATH=/opt/tritonserver/lib:$LD_LIBRARY_PATH
rm -f $CLIENT_LOG*

# PyTorch is required for the Python backend dlpack add sub models
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
RET=0

# Prepare float32 models with basic config
Expand Down
4 changes: 2 additions & 2 deletions qa/L0_libtorch_instance_group_kind_model/test.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
Expand Down Expand Up @@ -39,7 +39,7 @@ if [ ! -z "$TEST_REPO_ARCH" ]; then
fi

pip3 uninstall -y torch
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html

DATADIR=/data/inferenceserver/${REPO_VERSION}/qa_model_repository
SERVER=/opt/tritonserver/bin/tritonserver
Expand Down
10 changes: 5 additions & 5 deletions qa/L0_trace/opentelemetry_unittest.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def _check_events(self, span_name, events, is_cancelled):
self.assertFalse(
all(entry in events for entry in root_events_http + root_events_grpc)
)
self.assertEquals(len(events), len(compute_events))
self.assertEqual(len(events), len(compute_events))

elif span_name == self.root_span:
# Check that root span has INFER_RESPONSE_COMPLETE, _RECV/_WAITREAD
Expand All @@ -233,12 +233,12 @@ def _check_events(self, span_name, events, is_cancelled):
if "HTTP" in events:
self.assertTrue(all(entry in events for entry in root_events_http))
self.assertFalse(all(entry in events for entry in root_events_grpc))
self.assertEquals(len(events), len(root_events_http))
self.assertEqual(len(events), len(root_events_http))

elif "GRPC" in events:
self.assertTrue(all(entry in events for entry in root_events_grpc))
self.assertFalse(all(entry in events for entry in root_events_http))
self.assertEquals(len(events), len(root_events_grpc))
self.assertEqual(len(events), len(root_events_grpc))

if is_cancelled == False:
self.assertFalse(all(entry in events for entry in request_events))
Expand All @@ -254,7 +254,7 @@ def _check_events(self, span_name, events, is_cancelled):
all(entry in events for entry in root_events_http + root_events_grpc)
)
self.assertFalse(all(entry in events for entry in compute_events))
self.assertEquals(len(events), len(request_events))
self.assertEqual(len(events), len(request_events))

elif span_name.startswith("CUSTOM_ACTIVITY"):
custom_activity_events = []
Expand All @@ -276,7 +276,7 @@ def _check_events(self, span_name, events, is_cancelled):
all(entry in events for entry in custom_activity_events),
"Span " + span_name,
)
self.assertEquals(
self.assertEqual(
len(events), len(custom_activity_events), "Span " + span_name
)

Expand Down
2 changes: 1 addition & 1 deletion qa/L0_warmup/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ wait $SERVER_PID
# Test the onnx model to verify that the memory type of the output tensor
# remains unchanged with the warmup setting
pip3 uninstall -y torch
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html

rm -fr models && mkdir models
cp -r /data/inferenceserver/${REPO_VERSION}/qa_model_repository/onnx_nobatch_float32_float32_float32 models/.
Expand Down
Loading