Skip to content

Commit

Permalink
Update improper tests for added checking in response sender
Browse files Browse the repository at this point in the history
  • Loading branch information
kthui committed May 31, 2024
1 parent 62662af commit 29882ce
Showing 1 changed file with 71 additions and 8 deletions.
79 changes: 71 additions & 8 deletions qa/L0_backend_python/response_sender/response_sender_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,10 +538,62 @@ def test_decoupled_one_response_on_and_post_return(self):
# TODO: Test for async decoupled after fixing 'AsyncEventFutureDoneCallback'
# using `py_future.result()` with error hangs on exit.

# Non-decoupled model send response final flag before request return.
def test_non_decoupled_zero_response_pre_return(self):
# Note: The final flag will raise an exception which stops the model. Since the
# exception happens before the model returns, it will be caught by the
# stub process which pass it to the backend and sent an error response
# with final flag.
number_of_response_before_return = 0
send_complete_final_flag_before_return = True
return_a_response = False
number_of_response_after_return = 0
send_complete_final_flag_after_return = False
expected_message = (
"Non-decoupled model cannot send complete final before sending a response"
)

model_name = "response_sender"
responses = self._infer(
model_name,
number_of_response_before_return,
send_complete_final_flag_before_return,
return_a_response,
number_of_response_after_return,
send_complete_final_flag_after_return,
)
self._assert_responses_exception(responses, expected_message)
# Do NOT group into a for-loop as it hides which model failed.
model_name = "response_sender_async"
responses = self._infer(
model_name,
number_of_response_before_return,
send_complete_final_flag_before_return,
return_a_response,
number_of_response_after_return,
send_complete_final_flag_after_return,
)
self._assert_responses_exception(responses, expected_message)

# Non-decoupled model send response final flag after request return.
@unittest.skip("Model unload will hang, see the TODO comment.")
def test_non_decoupled_zero_response_post_return(self):
# Note: The final flag will raise an exception which stops the model. Since the
# exception happens after the model returns, it cannot be caught by the
# stub (i.e. in a daemon thread), so nothing will happen.
# TODO: Since the stub does not know if the model failed after returning, the
# complete final flag is not sent and will hang when unloading the model.
# How to detect such event and close the response factory?
raise NotImplementedError("No testing is performed")

# Non-decoupled model send 2 response before return.
def test_non_decoupled_two_response_pre_return(self):
# Note: The 2 responses sent will make their way to the frontend, but only the
# response at index 0 will be sent back to the client.
# Note: The 1st response will make its way to the client, but sending the 2nd
# response will raise an exception which stops the model. Since the
# exception happens before the model returns, it will be caught by the
# stub process which pass it to the backend and sent an error response
# with final flag. Since this is non-decoupled model using gRPC stream,
# any response after the 1st will be discarded by the frontend.
self._assert_non_decoupled_infer_with_expected_response_success(
number_of_response_before_return=2,
send_complete_final_flag_before_return=True,
Expand All @@ -554,9 +606,15 @@ def test_non_decoupled_two_response_pre_return(self):
)

# Non-decoupled model send 2 response after return.
@unittest.skip("Model unload will hang, see the TODO comment.")
def test_non_decoupled_two_response_post_return(self):
# Note: The 2 responses sent will make their way to the frontend, but only the
# response at index 0 will be sent back to the client.
# Note: The 1st response will make its way to the client, but sending the 2nd
# response will raise an exception which stops the model. Since the
# exception happens after the model returns, it cannot be caught by the
# stub (i.e. in a daemon thread), so nothing will happen.
# TODO: Since the stub does not know if the model failed after returning, the
# complete final flag is not sent and will hang when unloading the model.
# How to detect such event and close the response factory?
self._assert_non_decoupled_infer_with_expected_response_success(
number_of_response_before_return=0,
send_complete_final_flag_before_return=False,
Expand All @@ -570,8 +628,10 @@ def test_non_decoupled_two_response_post_return(self):

# Non-decoupled model send 1 response and return 1 response.
def test_non_decoupled_one_response_pre_and_on_return(self):
# Note: The 2 responses sent will make their way to the frontend, but only the
# response at index 0 will be sent back to the client.
# Note: The sent response will make its way to the client and complete final.
# The returned response will see the response sender is closed and raise
# an exception. The backend should see the request is closed and do
# nothing upon receiving the error from stub.
self._assert_non_decoupled_infer_with_expected_response_success(
number_of_response_before_return=1,
send_complete_final_flag_before_return=True,
Expand All @@ -585,8 +645,11 @@ def test_non_decoupled_one_response_pre_and_on_return(self):

# Non-decoupled model return 1 response and send 1 response.
def test_non_decoupled_one_response_on_and_pre_return(self):
# Note: The 2 responses sent will make their way to the frontend, but only the
# response at index 0 will be sent back to the client.
# Note: The returned response will send the response to the client and complete
# final. The sent response will see the response sender is closed and
# raise an exception. Since the exception happens after the model returns,
# it cannot be caught by the stub (i.e. in a daemon thread), so nothing
# will happen.
self._assert_non_decoupled_infer_with_expected_response_success(
number_of_response_before_return=0,
send_complete_final_flag_before_return=False,
Expand Down

0 comments on commit 29882ce

Please sign in to comment.