diff --git a/integration/README.md b/integration/README.md index 4434a9e..c961ec2 100644 --- a/integration/README.md +++ b/integration/README.md @@ -64,49 +64,50 @@ Launch an automated test sequence on Silkworm RpcDaemon (aka Silkrpc) or Erigon -e,--verify-external-provider: send any request also to external API endpoint as reference -i,--without-compare-results: send request without compare results -w,--waiting_time: waiting after test execution (millisec) +-S,--serial: runs tests in serial way ``` # Invoke examples ``` -./run_tests.py -b mainnet -d -c -v 1 +./run_tests.py -d -c -v 1 ``` Runs all tests on main net chain comparing Silkrpc response with rpcdaemon response, printing each test result ``` -./run_tests.py -b mainnet -c -v 1 +./run_tests.py -c -v 1 ``` Runs all tests on main net chain comparing silkrpc response to response saved on json file, printing each test result ``` -./run_tests.py -b mainnet -c -a eth_call +./run_tests.py -c -A eth_call ``` Runs all tests of eth_call on main net chain comparing silkrpc response with saved json file, printing only failed tests ``` -./run_tests.py -b mainnet -r -c -a eth_call -t 1 +./run_tests.py -r -c -A eth_call -t 1 ``` Runs test 1 of eth_call on main net chain comparing rpcdaemon response to saved json file, printing only failed tests ``` -./run_tests.py -b mainnet -r -c -t 20 -v 1 +./run_tests.py -r -c -t 20 -v 1 ``` Runs test number 20 in main net chain using rpcdaemon and compare result with json file, printing each test result ``` -./run_tests.py -b mainnet -d -c -X 20 -v 1 +./run_tests.py -d -c -X 20 -v 1 ``` Runs all tests (excluding test number 20) on main net chain comparing silkrpc response with rpcdaemon response, printing each test result ``` -./run_tests.py -b mainnet -d -c -x eth_call -v 1 +./run_tests.py -d -c -x eth_call -v 1 ``` diff --git a/integration/run_tests.py b/integration/run_tests.py index f0246bb..7d9f7ab 100755 --- a/integration/run_tests.py +++ b/integration/run_tests.py @@ -10,6 +10,7 @@ import sys import tarfile import time +from concurrent.futures import ProcessPoolExecutor import pytz import jwt import requests @@ -20,7 +21,7 @@ RPCDAEMON = "rpcdaemon" EXTERNAL_PROVIDER = "external-provider" TIME=0.1 -MAX_TIME = 100 # times of TIME secs +MAX_TIME = 200 # times of TIME secs api_not_compared = [ "mainnet/engine_getClientVersionV1", # not supported by erigon @@ -120,6 +121,7 @@ def usage(argv): print("-e,--verify-external-provider: send any request also to external API endpoint as reference") print("-i,--without-compare-results: send request without compare results") print("-w,--waiting_time: waiting after test execution (millisec)") + print("-S,--serial: all tests are runned in serial way") def get_target_name(target_type: str): @@ -315,6 +317,7 @@ def __init__(self): self.jwt_secret = "" self.display_only_fail = 0 self.transport_type = "http" + self.parallel = True self.use_jsondiff = True self.without_compare_results = False self.waiting_time = 0 @@ -322,17 +325,22 @@ def __init__(self): def select_user_options(self, argv): """ process user command """ try: - opts, _ = getopt.getopt(argv[1:], "iw:hfrcv:t:l:a:de:b:ox:X:H:k:s:p:T:A:j", + opts, _ = getopt.getopt(argv[1:], "iw:hfrcv:t:l:a:de:b:ox:X:H:k:s:p:T:A:jS", ['help', 'continue', 'erigon-rpcdaemon', 'verify-external-provider', 'host=', 'port=', 'display-only-fail', 'verbose=', 'run-single-test=', 'start-from-test=', 'api-list-with=', 'api-list=','loops=', 'compare-erigon-rpcdaemon', 'jwt=', 'blockchain=', 'transport_type=', 'exclude-api-list=', 'exclude-test-list=', 'json-diff', 'waiting_time=', - 'dump-response', 'without-compare-results']) + 'dump-response', 'without-compare-results', 'serial']) for option, optarg in opts: if option in ("-h", "--help"): usage(argv) sys.exit(1) elif option in ("-w", "--waiting_time"): + if self.parallel: + print("Error on options: " + "-w/--waiting_time is not compatible with parallel tests configuration (default config)") + usage(argv) + sys.exit(1) self.waiting_time = int(optarg) elif option in ("-c", "--continue"): self.exit_on_fail = 0 @@ -346,6 +354,8 @@ def select_user_options(self, argv): elif option in ("-e", "--verify-external-provider"): self.daemon_as_reference = EXTERNAL_PROVIDER self.external_provider_url = optarg + elif option in ("-S", "--serial"): + self.parallel = False elif option in ("-H", "--host"): self.daemon_on_host = optarg elif option in ("-p", "--port"): @@ -446,41 +456,34 @@ def select_user_options(self, argv): shutil.rmtree(self.output_dir) -def get_json_from_response(msg, verbose_level: int, json_file, result: str, test_number, exit_on_fail: int): +def get_json_from_response(msg, verbose_level: int, json_file, result: str, test_number): """ Retrieve JSON from response """ if verbose_level > 2: print(msg + " :[" + result + "]") if len(result) == 0: - file = json_file.ljust(60) - if verbose_level == 0: - print(f"{test_number:04d}. {file} Failed [" + msg + "] (json response is zero length, maybe server is down)") - else: - print("Failed [" + msg + "] (response zero length, maybe server is down)") - if exit_on_fail: - print("TEST ABORTED!") - sys.exit(1) - return None + error_msg = "Failed (json response is zero length, maybe server is down)" + return None, error_msg try: - return result + return result, "" except json.decoder.JSONDecodeError: file = json_file.ljust(60) - print(f"{test_number:04d}. {file} Failed [" + msg + "] (bad json format)") + error_msg = "Failed (bad json format)" if verbose_level: print(msg) print("Failed (bad json format)") print(result) - if exit_on_fail: - print("TEST ABORTED!") - sys.exit(1) - return None + return None, error_msg def dump_jsons(dump_json, silk_file, exp_rsp_file, output_dir, response, expected_response: str): """ dump jsons on result dir """ if dump_json: if silk_file != "" and os.path.exists(output_dir) == 0: - os.mkdir(output_dir) + try: + os.mkdir(output_dir) + except: + pass if silk_file != "": with open(silk_file, 'w', encoding='utf8') as json_file_ptr: json_file_ptr.write(json.dumps(response, indent=2, sort_keys=True)) @@ -508,7 +511,8 @@ def execute_request(transport_type: str, jwt_auth, encoded, request_dumps, targe return "" result = rsp.json() except: - print("\nhttp connection fail") + if verbose_level: + print("\nhttp connection fail") return "" else: ws_target = "ws://" + target # use websocket @@ -534,7 +538,8 @@ def execute_request(transport_type: str, jwt_auth, encoded, request_dumps, targe result = json.loads(rsp) except: - print("\nwebsocket connection fail") + if verbose_level: + print("\nwebsocket connection fail") return "" if verbose_level > 1: @@ -545,14 +550,14 @@ def execute_request(transport_type: str, jwt_auth, encoded, request_dumps, targe return result -def run_compare(use_jsondiff, temp_file1, temp_file2, diff_file): +def run_compare(use_jsondiff, temp_file1, temp_file2, diff_file, test_number): """ run Compare command and verify if command complete. """ if use_jsondiff: + cmd = "json-diff -s " + temp_file2 + " " + temp_file1 + " > " + diff_file + " 2> /dev/null &" already_failed = False - cmd = "json-diff -s " + temp_file2 + " " + temp_file1 + " > " + diff_file + " &" else: - cmd = "diff " + temp_file2 + " " + temp_file1 + " > " + diff_file + " &" + cmd = "diff " + temp_file2 + " " + temp_file1 + " > " + diff_file + " 2> /dev/null &" already_failed = True os.system(cmd) idx = 0 @@ -560,15 +565,18 @@ def run_compare(use_jsondiff, temp_file1, temp_file2, diff_file): idx += 1 time.sleep(TIME) # verify if json-diff or diff in progress - cmd = "ps aux | grep -v run_tests | grep 'diff' | grep -v 'grep' | awk '{print $2}'" + cmd = "ps aux | grep -v run_tests | grep 'diff' | grep -v 'grep' | grep test_" + str(test_number) + " | awk '{print $2}'" pid = os.popen(cmd).read() if pid == "": # json-diff or diff terminated return 1 if idx >= MAX_TIME: + killing_pid = pid.strip() # reach timeout. kill it - cmd = "kill -9 " + pid + cmd = "kill -9 " + killing_pid + " >& /dev/null" + print ("kill: ", str(test_number), cmd) os.system(cmd) + print ("killed: ", str(test_number), cmd) if already_failed: # timeout with json-diff and diff so return timeout->0 return 0 @@ -581,8 +589,11 @@ def run_compare(use_jsondiff, temp_file1, temp_file2, diff_file): def compare_json(config, response, json_file, silk_file, exp_rsp_file, diff_file: str, test_number): """ Compare JSON response. """ - temp_file1 = "/tmp/silk_lower_case" - temp_file2 = "/tmp/rpc_lower_case" + base_name = "/tmp/test_" + str(test_number) + "/" + if os.path.exists(base_name) == 0: + os.mkdir(base_name) + temp_file1 = base_name + "silk_lower_case.txt" + temp_file2 = base_name + "rpc_lower_case.txt" if "error" in response: to_lower_case(silk_file, temp_file1) @@ -602,101 +613,81 @@ def compare_json(config, response, json_file, silk_file, exp_rsp_file, diff_file replace_message(exp_rsp_file, temp_file1, removed_line_string) replace_message(silk_file, temp_file2, removed_line_string) - diff_result = run_compare(config.use_jsondiff, temp_file1, temp_file2, diff_file) + diff_result = run_compare(config.use_jsondiff, temp_file1, temp_file2, diff_file, test_number) diff_file_size = 0 return_code = 1 # ok + error_msg = "" if diff_result == 1: diff_file_size = os.stat(diff_file).st_size if diff_file_size != 0 or diff_result == 0: - file = json_file.ljust(60) if diff_result == 0: - print(f"{test_number:04d}. {file} Failed Timeout") + error_msg = "Failed Timeout" else: - print(f"{test_number:04d}. {file} Failed") - if config.verbose_level: - print("Failed") - if config.exit_on_fail: - print("TEST ABORTED!") - sys.exit(1) + error_msg = "Failed" return_code = 0 # failed - elif config.verbose_level: - print("OK") if os.path.exists(temp_file1): os.remove(temp_file1) if os.path.exists(temp_file2): os.remove(temp_file2) - return return_code + return return_code, error_msg def process_response(result, result1, response_in_file: str, config, output_dir: str, silk_file: str, exp_rsp_file: str, diff_file: str, json_file: str, test_number: int): """ Process the response If exact result or error don't care, they are null but present in expected_response. """ - response = get_json_from_response(config.daemon_under_test, config.verbose_level, json_file, result, test_number, config.exit_on_fail) + response, error_msg = get_json_from_response(config.daemon_under_test, config.verbose_level, json_file, result, test_number) if response is None: - return 0 + return 0, error_msg if result1 != "": - expected_response = get_json_from_response(config.daemon_as_reference, config.verbose_level, json_file, result1, test_number, - config.exit_on_fail) + expected_response, error_msg = get_json_from_response(config.daemon_as_reference, config.verbose_level, json_file, result1, test_number) if expected_response is None: - return 0 + return 0, error_msg else: expected_response = response_in_file if config.without_compare_results is True: - if config.verbose_level: - print("OK") dump_jsons(config.force_dump_jsons, silk_file, exp_rsp_file, output_dir, response, expected_response) - return 1 + return 1, "" if response is None: - if config.verbose_level: - print("Failed [" + config.daemon_under_test + "] (server doesn't response)") - return 0 + return 0, "Failed [" + config.daemon_under_test + "] (server doesn't response)" if expected_response is None: - if config.verbose_level: - print("Failed [" + config.daemon_as_reference + "] (server doesn't response)") - return 0 + return 0, "Failed [" + config.daemon_as_reference + "] (server doesn't response)" if response != expected_response: if "result" in response and "result" in expected_response and expected_response["result"] is None and result1 == "": # response and expected_response are different but don't care - if config.verbose_level: - print("OK") dump_jsons(config.force_dump_jsons, silk_file, exp_rsp_file, output_dir, response, expected_response) - return 1 + return 1, "" if "error" in response and "error" in expected_response and expected_response["error"] is None: # response and expected_response are different but don't care - if config.verbose_level: - print("OK") dump_jsons(config.force_dump_jsons, silk_file, exp_rsp_file, output_dir, response, expected_response) - return 1 + return 1, "" if "error" not in expected_response and "result" not in expected_response: # response and expected_response are different but don't care - if config.verbose_level: - print("OK") dump_jsons(config.force_dump_jsons, silk_file, exp_rsp_file, output_dir, response, expected_response) - return 1 + return 1, "" dump_jsons(True, silk_file, exp_rsp_file, output_dir, response, expected_response) - same = compare_json(config, response, json_file, silk_file, exp_rsp_file, diff_file, test_number) + same, error_msg = compare_json(config, response, json_file, silk_file, exp_rsp_file, diff_file, test_number) # cleanup if same: os.remove(silk_file) os.remove(exp_rsp_file) os.remove(diff_file) if not os.listdir(output_dir): - os.rmdir(output_dir) + try: + os.rmdir(output_dir) + except: + pass - return same - - if config.verbose_level: - print("OK") + return same, error_msg dump_jsons(config.force_dump_jsons, silk_file, exp_rsp_file, output_dir, response, expected_response) - return 1 + return 1, "" def run_test(json_file: str, test_number, transport_type, config): @@ -708,8 +699,7 @@ def run_test(json_file: str, test_number, transport_type, config): with tarfile.open(json_filename, encoding='utf-8') as tar: files = tar.getmembers() if len(files) != 1: - print("bad archive file " + json_filename) - sys.exit(1) + return 0, "bad archive file " + json_filename file = tar.extractfile(files[0]) buff = file.read() tar.close() @@ -788,18 +778,28 @@ def main(argv) -> int: start_time = datetime.now() os.mkdir(config.output_dir) - match = 0 executed_tests = 0 failed_tests = 0 success_tests = 0 tests_not_executed = 0 global_test_number = 1 + + if config.parallel == True: + print ("Runs tests in parallel") + exe = ProcessPoolExecutor() + else: + print ("Runs tests in serial way") + exe = ProcessPoolExecutor(max_workers=1) + + for test_rep in range(0, config.loop_number): # makes tests more times - test_number_in_any_loop = 1 - if config.verbose_level: - print("Test iteration: ", test_rep + 1) + if config.loop_number != 1: + print("\r ",end='', flush=True) + print(f"\nTest iteration: ", test_rep + 1, " ") tokenize_transport_type = config.transport_type.split(",") for transport_type in tokenize_transport_type: + test_number_in_any_loop = 1 + tests_descr_list = [] dirs = sorted(os.listdir(config.json_dir)) for curr_api in dirs: # scans all api present in dir # jump results folder or any hidden OS-specific folder @@ -832,31 +832,54 @@ def main(argv) -> int: if (config.start_test == "" or # start from specific test (config.start_test != "" and test_number_in_any_loop >= int( config.start_test))): - file = json_test_full_name.ljust(60) - curr_tt = transport_type.ljust(15) - if config.verbose_level: - print(f"{test_number_in_any_loop:04d}. {curr_tt}::{file} ", end='', flush=True) - else: - print(f"{test_number_in_any_loop:04d}. {curr_tt}::{file}\r", end='', flush=True) - ret = run_test(json_test_full_name, test_number_in_any_loop, transport_type, config) + # create process pool + future = exe.submit(run_test, json_test_full_name, test_number_in_any_loop, transport_type, config) + tests_descr_list.append({'name': json_test_full_name, 'number': test_number_in_any_loop, 'transport-type': transport_type, 'future': future}) if config.waiting_time: - time.sleep(config.waiting_time/1000) - if ret == 1: - success_tests = success_tests + 1 - else: - failed_tests = failed_tests + 1 + time.sleep(config.waiting_time/1000) executed_tests = executed_tests + 1 global_test_number = global_test_number + 1 test_number_in_any_loop = test_number_in_any_loop + 1 test_number = test_number + 1 - if executed_tests == 0: - print("ERROR: api-name or testNumber not found") - return 1 + # when all tests on specific transport type are spawned + if executed_tests == 0: + print("ERROR: api-name or testNumber not found") + return 1 + + # waits the future to check tests results + cancel = 0 + for test in tests_descr_list: + curr_json_test_full_name = test['name'] + curr_test_number_in_any_loop = test['number'] + curr_transport_type = test['transport-type'] + curr_future = test['future'] + file = curr_json_test_full_name.ljust(60) + curr_tt = curr_transport_type.ljust(15) + if cancel: + future.cancel() + continue + print(f"{curr_test_number_in_any_loop:04d}. {curr_tt}::{file} ", end='', flush=True) + result, error_msg = curr_future.result() + if result == 1: + success_tests = success_tests + 1 + if config.verbose_level: + print(f"OK ",flush=True) + else: + print(f"OK \r",end='', flush=True) + else: + failed_tests = failed_tests + 1 + print(error_msg, "\r") + if config.exit_on_fail: + cancel = 1 + if config.exit_on_fail and failed_tests: + print("TEST ABORTED!") + break + # print results at the end of all the tests elapsed = datetime.now() - start_time - print(" \r") + print(" \r") print(f"Test time-elapsed: {str(elapsed)}") print(f"Number of executed tests: {executed_tests}/{global_test_number - 1}") print(f"Number of NOT executed tests: {tests_not_executed}")