Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add functionality to continue benchmarking in Triton C API mode if server logging support is disabled #34

Merged
merged 1 commit into from
Aug 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions src/client_backend/triton_c_api/triton_loader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,8 @@ TritonLoader::StartTriton()
// Check API version.
uint32_t api_version_major, api_version_minor;
REPORT_TRITONSERVER_ERROR(
api_version_fn_(&api_version_major, &api_version_minor));
api_version_fn_(&api_version_major, &api_version_minor),
"unable to get api version");
if ((TRITONSERVER_API_VERSION_MAJOR != api_version_major) ||
(TRITONSERVER_API_VERSION_MINOR > api_version_minor)) {
std::stringstream sstream;
Expand All @@ -264,10 +265,10 @@ TritonLoader::StartTriton()
RETURN_IF_TRITONSERVER_ERROR(
set_cuda_memory_pool_byte_size_(server_options, 0, 1073741824),
"setting cuda memory pool byte size failed.");
RETURN_IF_TRITONSERVER_ERROR(
REPORT_TRITONSERVER_ERROR(
set_log_verbose_fn_(server_options, verbose_level_),
"setting verbose logging level");
RETURN_IF_TRITONSERVER_ERROR(
REPORT_TRITONSERVER_ERROR(
set_log_info_fn_(server_options, verbose_),
"setting if log verbose level is true");
RETURN_IF_TRITONSERVER_ERROR(
Expand Down Expand Up @@ -1050,7 +1051,9 @@ TritonLoader::InferResponseCompleteAsync(
TRITONSERVER_InferenceResponse* response, const uint32_t flags,
AsyncRequestInfo* async_request_info)
{
REPORT_TRITONSERVER_ERROR(inference_response_error_fn_(response));
REPORT_TRITONSERVER_ERROR(
inference_response_error_fn_(response),
"unable to get inference response error");

if (async_request_info->enable_stats) {
tc::RequestTimers timer{*async_request_info->timer};
Expand Down
5 changes: 3 additions & 2 deletions src/client_backend/triton_c_api/triton_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,12 @@
} \
} while (false)

#define REPORT_TRITONSERVER_ERROR(E) \
#define REPORT_TRITONSERVER_ERROR(E, MSG) \
do { \
TRITONSERVER_Error* err__ = (E); \
if (err__ != nullptr) { \
std::cout << GetSingleton()->error_message_fn_(err__) << std::endl; \
std::cerr << "error: " << (MSG) << ": " \
<< GetSingleton()->error_message_fn_(err__) << std::endl; \
GetSingleton()->error_delete_fn_(err__); \
} \
} while (false)
Expand Down
Loading