diff --git a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt index 8205523260..4b1e56384e 100644 --- a/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt +++ b/epochX/cudacpp/pp_tt012j.mad/CODEGEN_mad_pp_tt012j_log.txt @@ -61,7 +61,7 @@ set zerowidth_tchannel F define j = p INFO: load particles INFO: load vertices -DEBUG: model prefixing takes 0.005696535110473633  +DEBUG: model prefixing takes 0.005710124969482422  INFO: Restrict model sm with file models/sm/restrict_default.dat . DEBUG: Simplifying conditional expressions  DEBUG: remove interactions: u s w+ at order: QED=1  @@ -172,7 +172,7 @@ INFO: Process u~ u > t t~ added to mirror process u u~ > t t~ INFO: Process c~ c > t t~ added to mirror process c c~ > t t~ INFO: Process d~ d > t t~ added to mirror process d d~ > t t~ INFO: Process s~ s > t t~ added to mirror process s s~ > t t~ -5 processes with 7 diagrams generated in 0.029 s +5 processes with 7 diagrams generated in 0.030 s Total: 5 processes with 7 diagrams add process p p > t t~ j @1 INFO: Checking for minimal orders which gives processes. @@ -212,7 +212,7 @@ INFO: Process d~ g > t t~ d~ added to mirror process g d~ > t t~ d~ INFO: Process d~ d > t t~ g added to mirror process d d~ > t t~ g INFO: Process s~ g > t t~ s~ added to mirror process g s~ > t t~ s~ INFO: Process s~ s > t t~ g added to mirror process s s~ > t t~ g -13 processes with 76 diagrams generated in 0.134 s +13 processes with 76 diagrams generated in 0.136 s Total: 18 processes with 83 diagrams add process p p > t t~ j j @2 INFO: Checking for minimal orders which gives processes. @@ -378,7 +378,7 @@ INFO: Process s~ u~ > t t~ u~ s~ added to mirror process u~ s~ > t t~ u~ s~ INFO: Process s~ c~ > t t~ c~ s~ added to mirror process c~ s~ > t t~ c~ s~ INFO: Process s~ d~ > t t~ d~ s~ added to mirror process d~ s~ > t t~ d~ s~ INFO: Crossed process found for s~ s~ > t t~ s~ s~, reuse diagrams. -65 processes with 1119 diagrams generated in 1.827 s +65 processes with 1119 diagrams generated in 1.815 s Total: 83 processes with 1202 diagrams output madevent_simd ../TMPOUT/CODEGEN_mad_pp_tt012j --hel_recycling=False --vector_size=32 Load PLUGIN.CUDACPP_OUTPUT @@ -500,7 +500,7 @@ INFO: Combined process d d~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED INFO: Combined process s s~ > t t~ WEIGHTED<=2 with process u u~ > t t~ WEIGHTED<=2 INFO: Creating files in directory P2_gg_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -521,7 +521,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxgg DEBUG: diag_to_iconfig =  {2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 12: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 26: 25, 27: 26, 28: 27, 29: 28, 30: 29, 31: 30, 33: 31, 34: 32, 35: 33, 36: 34, 37: 35, 38: 36, 39: 37, 40: 38, 41: 39, 42: 40, 43: 41, 44: 42, 45: 43, 46: 44, 47: 45, 49: 46, 50: 47, 51: 48, 52: 49, 53: 50, 54: 51, 55: 52, 56: 53, 57: 54, 59: 55, 60: 56, 61: 57, 62: 58, 63: 59, 64: 60, 65: 61, 66: 62, 67: 63, 68: 64, 69: 65, 70: 66, 71: 67, 72: 68, 73: 69, 75: 70, 76: 71, 77: 72, 78: 73, 79: 74, 80: 75, 81: 76, 82: 77, 83: 78, 84: 79, 85: 80, 86: 81, 87: 82, 88: 83, 89: 84, 90: 85, 91: 86, 92: 87, 94: 88, 95: 89, 96: 90, 97: 91, 98: 92, 99: 93, 101: 94, 102: 95, 103: 96, 104: 97, 105: 98, 106: 99, 108: 100, 109: 101, 110: 102, 111: 103, 112: 104, 113: 105} [model_handling.py at line 1544]  INFO: Creating files in directory P2_gg_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -542,7 +542,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxuux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1544]  INFO: Creating files in directory P2_gu_ttxgu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -563,7 +563,7 @@ INFO: Finding symmetric diagrams for subprocess group gu_ttxgu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1544]  INFO: Creating files in directory P2_gux_ttxgux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -584,7 +584,7 @@ INFO: Finding symmetric diagrams for subprocess group gux_ttxgux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uux_ttxgg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -605,7 +605,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxgg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 35: 34, 36: 35} [model_handling.py at line 1544]  INFO: Creating files in directory P1_gg_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -626,7 +626,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttxg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uu_ttxuu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -647,7 +647,7 @@ INFO: Finding symmetric diagrams for subprocess group uu_ttxuu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uux_ttxuux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -668,7 +668,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxuux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uxux_ttxuxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -689,7 +689,7 @@ INFO: Finding symmetric diagrams for subprocess group uxux_ttxuxux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uc_ttxuc DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -710,7 +710,7 @@ INFO: Finding symmetric diagrams for subprocess group uc_ttxuc DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uux_ttxccx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -731,7 +731,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxccx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1544]  INFO: Creating files in directory P2_ucx_ttxucx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -752,7 +752,7 @@ INFO: Finding symmetric diagrams for subprocess group ucx_ttxucx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1544]  INFO: Creating files in directory P2_uxcx_ttxuxcx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -773,7 +773,7 @@ INFO: Finding symmetric diagrams for subprocess group uxcx_ttxuxcx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7} [model_handling.py at line 1544]  INFO: Creating files in directory P1_gu_ttxu DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -794,7 +794,7 @@ INFO: Finding symmetric diagrams for subprocess group gu_ttxu DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1544]  INFO: Creating files in directory P1_gux_ttxux DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -815,7 +815,7 @@ INFO: Finding symmetric diagrams for subprocess group gux_ttxux DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1544]  INFO: Creating files in directory P1_uux_ttxg DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -836,7 +836,7 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttxg DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3, 4: 4, 5: 5} [model_handling.py at line 1544]  INFO: Creating files in directory P0_gg_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -857,7 +857,7 @@ INFO: Finding symmetric diagrams for subprocess group gg_ttx DEBUG: diag_to_iconfig =  {1: 1, 2: 2, 3: 3} [model_handling.py at line 1544]  INFO: Creating files in directory P0_uux_ttx DEBUG: kwargs[prefix] = 0 [model_handling.py at line 1151]  -DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  +DEBUG: process_exporter_cpp =  [export_v4.py at line 6261]  INFO: Creating files in directory . FileWriter for ././CPPProcess.h FileWriter for ././CPPProcess.cc @@ -876,15 +876,15 @@ INFO: Finding symmetric diagrams for subprocess group uux_ttx DEBUG: len(subproc_diagrams_for_config) =  1 [model_handling.py at line 1519]  DEBUG: iconfig_to_diag =  {1: 1} [model_handling.py at line 1543]  DEBUG: diag_to_iconfig =  {1: 1} [model_handling.py at line 1544]  -Generated helas calls for 18 subprocesses (372 diagrams) in 1.282 s -Wrote files for 810 helas calls in 3.517 s +Generated helas calls for 18 subprocesses (372 diagrams) in 1.285 s +Wrote files for 810 helas calls in 3.500 s ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 5 routines in 0.341 s +ALOHA: aloha creates 5 routines in 0.335 s DEBUG: Entering PLUGIN_ProcessExporter.convert_model (create the model) [output.py at line 205]  ALOHA: aloha starts to compute helicity amplitudes ALOHA: aloha creates VVV1 routines @@ -892,7 +892,7 @@ ALOHA: aloha creates FFV1 routines ALOHA: aloha creates VVVV1 routines ALOHA: aloha creates VVVV3 routines ALOHA: aloha creates VVVV4 routines -ALOHA: aloha creates 10 routines in 0.317 s +ALOHA: aloha creates 10 routines in 0.312 s VVV1 VVV1 FFV1 @@ -938,16 +938,16 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 146 (offset 3 lines). Hunk #3 succeeded at 224 (offset 3 lines). -Hunk #4 succeeded at 252 (offset 3 lines). -Hunk #5 succeeded at 297 (offset 3 lines). +Hunk #4 succeeded at 253 (offset 3 lines). +Hunk #5 succeeded at 298 (offset 3 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P1_gg_ttxg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f patching file driver.f patching file matrix1.f Hunk #2 succeeded at 159 (offset 16 lines). Hunk #3 succeeded at 237 (offset 16 lines). -Hunk #4 succeeded at 265 (offset 16 lines). -Hunk #5 succeeded at 310 (offset 16 lines). +Hunk #4 succeeded at 266 (offset 16 lines). +Hunk #5 succeeded at 311 (offset 16 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P1_gu_ttxu; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 528 (offset 44 lines). @@ -956,8 +956,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 162 (offset 19 lines). Hunk #3 succeeded at 240 (offset 19 lines). -Hunk #4 succeeded at 268 (offset 19 lines). -Hunk #5 succeeded at 313 (offset 19 lines). +Hunk #4 succeeded at 269 (offset 19 lines). +Hunk #5 succeeded at 314 (offset 19 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P1_gux_ttxux; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 528 (offset 44 lines). @@ -966,8 +966,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 162 (offset 19 lines). Hunk #3 succeeded at 240 (offset 19 lines). -Hunk #4 succeeded at 268 (offset 19 lines). -Hunk #5 succeeded at 313 (offset 19 lines). +Hunk #4 succeeded at 269 (offset 19 lines). +Hunk #5 succeeded at 314 (offset 19 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P1_uux_ttxg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 539 (offset 55 lines). @@ -976,16 +976,16 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 162 (offset 19 lines). Hunk #3 succeeded at 240 (offset 19 lines). -Hunk #4 succeeded at 268 (offset 19 lines). -Hunk #5 succeeded at 313 (offset 19 lines). +Hunk #4 succeeded at 269 (offset 19 lines). +Hunk #5 succeeded at 314 (offset 19 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_gg_ttxgg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f patching file driver.f patching file matrix1.f Hunk #2 succeeded at 191 (offset 48 lines). Hunk #3 succeeded at 269 (offset 48 lines). -Hunk #4 succeeded at 297 (offset 48 lines). -Hunk #5 succeeded at 342 (offset 48 lines). +Hunk #4 succeeded at 298 (offset 48 lines). +Hunk #5 succeeded at 343 (offset 48 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_gg_ttxuux; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 517 (offset 33 lines). @@ -994,8 +994,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_gu_ttxgu; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 528 (offset 44 lines). @@ -1004,8 +1004,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_gux_ttxgux; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 528 (offset 44 lines). @@ -1014,8 +1014,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uc_ttxuc; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 555 (offset 71 lines). @@ -1024,8 +1024,8 @@ patching file matrix1.f Hunk #1 succeeded at 77 (offset 5 lines). Hunk #2 succeeded at 196 (offset 53 lines). Hunk #3 succeeded at 274 (offset 53 lines). -Hunk #4 succeeded at 302 (offset 53 lines). -Hunk #5 succeeded at 347 (offset 53 lines). +Hunk #4 succeeded at 303 (offset 53 lines). +Hunk #5 succeeded at 348 (offset 53 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_ucx_ttxucx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 627 (offset 143 lines). @@ -1034,8 +1034,8 @@ patching file matrix1.f Hunk #1 succeeded at 83 (offset 11 lines). Hunk #2 succeeded at 202 (offset 59 lines). Hunk #3 succeeded at 280 (offset 59 lines). -Hunk #4 succeeded at 308 (offset 59 lines). -Hunk #5 succeeded at 353 (offset 59 lines). +Hunk #4 succeeded at 309 (offset 59 lines). +Hunk #5 succeeded at 354 (offset 59 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uu_ttxuu; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 539 (offset 55 lines). @@ -1044,8 +1044,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uux_ttxccx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 627 (offset 143 lines). @@ -1054,8 +1054,8 @@ patching file matrix1.f Hunk #1 succeeded at 83 (offset 11 lines). Hunk #2 succeeded at 202 (offset 59 lines). Hunk #3 succeeded at 280 (offset 59 lines). -Hunk #4 succeeded at 308 (offset 59 lines). -Hunk #5 succeeded at 353 (offset 59 lines). +Hunk #4 succeeded at 309 (offset 59 lines). +Hunk #5 succeeded at 354 (offset 59 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uux_ttxgg; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 539 (offset 55 lines). @@ -1064,8 +1064,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uux_ttxuux; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 539 (offset 55 lines). @@ -1074,8 +1074,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uxcx_ttxuxcx; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 555 (offset 71 lines). @@ -1084,8 +1084,8 @@ patching file matrix1.f Hunk #1 succeeded at 77 (offset 5 lines). Hunk #2 succeeded at 196 (offset 53 lines). Hunk #3 succeeded at 274 (offset 53 lines). -Hunk #4 succeeded at 302 (offset 53 lines). -Hunk #5 succeeded at 347 (offset 53 lines). +Hunk #4 succeeded at 303 (offset 53 lines). +Hunk #5 succeeded at 348 (offset 53 lines). DEBUG: cd /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j/SubProcesses/P2_uxux_ttxuxux; patch -p6 -i /data/avalassi/GPU2023/madgraph4gpuX/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 patching file auto_dsig1.f Hunk #1 succeeded at 539 (offset 55 lines). @@ -1094,8 +1094,8 @@ patching file matrix1.f Hunk #1 succeeded at 75 (offset 3 lines). Hunk #2 succeeded at 194 (offset 51 lines). Hunk #3 succeeded at 272 (offset 51 lines). -Hunk #4 succeeded at 300 (offset 51 lines). -Hunk #5 succeeded at 345 (offset 51 lines). +Hunk #4 succeeded at 301 (offset 51 lines). +Hunk #5 succeeded at 346 (offset 51 lines). DEBUG: p.returncode =  0 [output.py at line 241]  Output to directory /data/avalassi/GPU2023/madgraph4gpuX/MG5aMC/TMPOUT/CODEGEN_mad_pp_tt012j done. Type "launch" to generate events from this process, or see @@ -1103,9 +1103,9 @@ Type "launch" to generate events from this process, or see Run "open index.html" to see more information about this process. quit -real 0m11.632s -user 0m10.262s -sys 0m0.921s +real 0m11.178s +user 0m10.214s +sys 0m0.919s Code generation completed in 12 seconds ************************************************************ * * diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/auto_dsig1.f index 7bd8ec493e..8e5f10abe2 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/auto_dsig1.f @@ -509,11 +509,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -541,12 +544,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -556,6 +563,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/matrix1.f index b1f45c3af7..5e8bc1f697 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_gg_ttx/matrix1.f @@ -229,7 +229,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/auto_dsig1.f index c4e476d6c0..d1322b9c35 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/matrix1.f index 8d74ac5b98..711a227601 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P0_uux_ttx/matrix1.f @@ -232,7 +232,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/auto_dsig1.f index c9ca1538d3..f41b2ac65f 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/auto_dsig1.f @@ -509,11 +509,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -541,12 +544,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -556,6 +563,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/matrix1.f index 3d035277eb..9ad8ba61f2 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gg_ttxg/matrix1.f @@ -245,7 +245,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/auto_dsig1.f index 2c11f53b89..c293962c6c 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/auto_dsig1.f @@ -553,11 +553,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -585,12 +588,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -600,6 +607,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/matrix1.f index 0a318e1c05..123640c5ae 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gu_ttxu/matrix1.f @@ -248,7 +248,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/auto_dsig1.f index d829a73049..91d60c990b 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/auto_dsig1.f @@ -553,11 +553,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -585,12 +588,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -600,6 +607,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/matrix1.f index f012b48d83..5f4d010bb2 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_gux_ttxux/matrix1.f @@ -248,7 +248,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/auto_dsig1.f index 0eb22610bf..dfdb33b21a 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/matrix1.f index 3d2319b36a..cfe3e37bad 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P1_uux_ttxg/matrix1.f @@ -248,7 +248,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/auto_dsig1.f index 6a17e242b2..848dc10114 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/auto_dsig1.f @@ -509,11 +509,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -541,12 +544,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -556,6 +563,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/matrix1.f index 926b17aa45..0d4f6b5539 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxgg/matrix1.f @@ -277,7 +277,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/auto_dsig1.f index a952958df8..c39c3bca0a 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/auto_dsig1.f @@ -542,11 +542,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -574,12 +577,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -589,6 +596,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/matrix1.f index 520aaec0b1..97a3d1c45a 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gg_ttxuux/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/auto_dsig1.f index a41c6f876a..91f560f3b1 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/auto_dsig1.f @@ -553,11 +553,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -585,12 +588,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -600,6 +607,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/matrix1.f index f77bfa066c..88e2776f95 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gu_ttxgu/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/auto_dsig1.f index 700cdbece2..986b220868 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/auto_dsig1.f @@ -553,11 +553,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -585,12 +588,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -600,6 +607,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/matrix1.f index 4c36b4bcce..272783c1ae 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_gux_ttxgux/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/auto_dsig1.f index bc898ac10e..ef18a27a07 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/auto_dsig1.f @@ -580,11 +580,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -612,12 +615,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -627,6 +634,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/matrix1.f index eec298dc6c..69a42e5831 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uc_ttxuc/matrix1.f @@ -282,7 +282,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/auto_dsig1.f index 3db88ba2c3..2f42342e5e 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/auto_dsig1.f @@ -652,11 +652,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -684,12 +687,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -699,6 +706,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/matrix1.f index a530c382f1..e94d889499 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_ucx_ttxucx/matrix1.f @@ -288,7 +288,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/auto_dsig1.f index 8988ba6c1d..a8734a65ea 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/matrix1.f index f6d8294bd3..6bcc6deddf 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uu_ttxuu/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/auto_dsig1.f index 37b6741d5b..29650513c2 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/auto_dsig1.f @@ -652,11 +652,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -684,12 +687,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -699,6 +706,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/matrix1.f index 4b974a1e79..1d6e64769a 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxccx/matrix1.f @@ -288,7 +288,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/auto_dsig1.f index 4f5f2bb65a..64145e43d4 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/matrix1.f index 3c33819612..6a1c5ad014 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxgg/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/auto_dsig1.f index 598e4f55b8..d106b30342 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/matrix1.f index 485ad633d3..f424d75e92 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uux_ttxuux/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/auto_dsig1.f index dd3cd5c8a4..2787aac1e7 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/auto_dsig1.f @@ -580,11 +580,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -612,12 +615,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -627,6 +634,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/matrix1.f index 16d80c44b6..65687b2dc8 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxcx_ttxuxcx/matrix1.f @@ -282,7 +282,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/auto_dsig1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/auto_dsig1.f index ef5dde5b56..60e4aca98e 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/auto_dsig1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/auto_dsig1.f @@ -564,11 +564,14 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, INTEGER*4 NWARNINGS SAVE NWARNINGS DATA NWARNINGS/0/ - - LOGICAL FIRST + + INTEGER IMIRROR, IPROC + COMMON/TO_MIRROR/IMIRROR, IPROC + + LOGICAL FIRST(2) SAVE FIRST - DATA FIRST/.TRUE./ - + DATA FIRST/.TRUE., .TRUE./ + IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2) #endif call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortran=-1 @@ -596,12 +599,16 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, WRITE(6,*) 'ERROR! The cudacpp bridge only supports LIMHEL=0' STOP ENDIF - IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461) - CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering - & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, - & SELECTED_HEL2, SELECTED_COL2 ) - FIRST = .FALSE. -c ! This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) + IF ( FIRST(IMIRROR) ) THEN ! exclude first pass (helicity filtering) from timers (#461) +c Compute helicities only for the first IMIRROR in cudacpp (see #872) - NB this may be IMIRROR=2!? + IF( FIRST(1) .AND. FIRST(2) ) THEN + CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering + & P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2, + & SELECTED_HEL2, SELECTED_COL2 ) + ENDIF + FIRST(IMIRROR) = .FALSE. +c ... But do call reset_cumulative_variable also for the second IMIRROR in cudacpp (FIX #872) +c This is a workaround for https://github.com/oliviermattelaer/mg5amc_test/issues/22 (see PR #486) IF( FBRIDGE_MODE .EQ. 1 ) THEN ! (CppOnly=1 : SMATRIX1 is not called at all) CALL RESET_CUMULATIVE_VARIABLE() ! mimic 'avoid bias of the initialization' within SMATRIX1 ENDIF @@ -611,6 +618,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNEL, & ' in total number of helicities', NTOTHEL, NCOMB STOP ENDIF + WRITE (6,*) 'IMIRROR =', IMIRROR WRITE (6,*) 'NGOODHEL =', NGOODHEL WRITE (6,*) 'NCOMB =', NCOMB ENDIF diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/matrix1.f b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/matrix1.f index 5510afb41e..b1e7a4ecd9 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/matrix1.f +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/P2_uxux_ttxuxux/matrix1.f @@ -280,7 +280,8 @@ SUBROUTINE SMATRIX1(P, RHEL, RCOL, CHANNEL, IVEC, ANS, IHEL, NGOODHEL(IMIRROR)=NGOODHEL(IMIRROR)+1 ENDIF END DO - WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) ! no need to print imirror? + WRITE (6,*) 'IMIRROR =', IMIRROR + WRITE (6,*) 'NGOODHEL =', NGOODHEL(IMIRROR) WRITE (6,*) 'NCOMB =', NCOMB ENDIF ENDIF