diff --git a/source/python/gui/source/tests/test_graphs.py b/source/python/gui/source/tests/test_graphs.py index 270816361..d9ec19538 100644 --- a/source/python/gui/source/tests/test_graphs.py +++ b/source/python/gui/source/tests/test_graphs.py @@ -75,7 +75,7 @@ ] -samples_df_expected_counts = [152, 138, 304, 138, 152, 828, 152, 138, 138] +samples_df_expected_counts = [138, 152, 276, 138, 152, 828, 138, 152, 138] input_files = find_causal_files( [workload_dir], default_settings["verbose"], default_settings["recursive"] @@ -214,7 +214,6 @@ def test_parse_files_default(): ][:2].round(4) assert sorted(file_names_run) == sorted(file_names) - _samples_df_expected_counts = [138, 152, 276, 138, 152, 828, 138, 152, 138] samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( @@ -225,7 +224,7 @@ def test_parse_files_default(): )["count"].to_numpy() assert (samples_df_locations == samples_df_expected_locations).all() - assert (samples_df_counts == _samples_df_expected_counts).all() + assert (samples_df_counts == samples_df_expected_counts).all() # assert expected speedup err assert (top_df["program speedup"].to_numpy() == top_df_expected_program_speedup).all() @@ -313,7 +312,7 @@ def test_parse_files_valid_directory(): assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -413,7 +412,7 @@ def test_parse_files_invalid_experiment(): samples_df_expected_counts = [4, 2, 6, 3, 4, 4] assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -470,7 +469,7 @@ def test_parse_files_valid_progress_regex(): assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -581,7 +580,7 @@ def test_parse_files_invalid_progress_regex(): file_names = [os.path.join(workload_dir, "experiments.coz")] results_df = results_df.round(4) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -755,7 +754,7 @@ def test_parse_files_invalid_speedup(): assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -795,7 +794,7 @@ def test_parse_files_valid_min_points(): assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy() @@ -897,7 +896,7 @@ def test_parse_files_high_min_points(): assert sorted(file_names_run) == sorted(file_names) - samples_df = samples_df.sort_values(by="location") + samples_df = samples_df.sort_values(by=["location", "count"]) samples_df_locations = pd.concat( [samples_df[0:3], samples_df[100:103], samples_df[150:153]] )["location"].to_numpy()