-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathenv_mach_pes.xml
177 lines (173 loc) · 7.07 KB
/
env_mach_pes.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
<?xml version="1.0"?>
<file id="env_mach_pes.xml" version="2.0">
<header>
These variables CANNOT be modified once case_setup has been
invoked without first invoking case_setup -reset
NTASKS: the total number of MPI tasks, a negative value indicates nodes rather than tasks.
NTHRDS: the number of OpenMP threads per MPI task.
ROOTPE: the global mpi task of the component root task, if negative, indicates nodes rather than tasks.
PSTRID: the stride of MPI tasks across the global set of pes (for now set to 1)
NINST : the number of component instances (will be spread evenly across NTASKS)
for example, for NTASKS = 8, NTHRDS = 2, ROOTPE = 32, NINST = 2
the MPI tasks would be placed starting on global pe 32 and each pe would be threaded 2-ways
These tasks will be divided amongst both instances (4 tasks each).
Note: PEs that support threading never have an MPI task associated
with them for performance reasons. As a result, NTASKS and ROOTPE
are relatively independent of NTHRDS and they determine
the layout of mpi processors between components. NTHRDS is used
to determine how those mpi tasks should be placed across the machine.
The following values should not be set by the user since they'll be
overwritten by scripts: TOTALPES, NTASKS_PER_INST
</header>
<comment>none</comment>
<group id="mach_pes">
<entry id="ESMF_AWARE_THREADING" value="FALSE">
<type>logical</type>
<valid_values>TRUE,FALSE</valid_values>
<desc>TRUE indicates that the ESMF Aware threading method is used</desc>
</entry>
<entry id="ALLOCATE_SPARE_NODES" value="FALSE">
<type>logical</type>
<valid_values>TRUE,FALSE</valid_values>
<desc>Allocate some spare nodes to handle node failures. The system will pick a reasonable number</desc>
</entry>
<entry id="FORCE_SPARE_NODES" value="-999">
<type>integer</type>
<desc>Force this exact number of spare nodes to be allocated</desc>
</entry>
<entry id="NTASKS">
<type>integer</type>
<values>
<value compclass="ATM">-1</value>
<value compclass="CPL">-1</value>
<value compclass="OCN">-1</value>
<value compclass="WAV">-1</value>
<value compclass="GLC">-1</value>
<value compclass="ICE">-1</value>
<value compclass="ROF">-1</value>
<value compclass="LND">-1</value>
<value compclass="ESP">-1</value>
</values>
<desc>number of tasks for each component</desc>
</entry>
<entry id="NTASKS_PER_INST">
<type>integer</type>
<values>
<value compclass="ATM">128</value>
<value compclass="OCN">128</value>
<value compclass="WAV">128</value>
<value compclass="GLC">128</value>
<value compclass="ICE">128</value>
<value compclass="ROF">128</value>
<value compclass="LND">128</value>
<value compclass="ESP">128</value>
</values>
<desc>Number of tasks per instance for each component. DO NOT EDIT: Set automatically by case.setup based on NTASKS, NINST and MULTI_DRIVER</desc>
</entry>
<entry id="NTHRDS">
<type>integer</type>
<values>
<value compclass="ATM">1</value>
<value compclass="CPL">1</value>
<value compclass="OCN">1</value>
<value compclass="WAV">1</value>
<value compclass="GLC">1</value>
<value compclass="ICE">1</value>
<value compclass="ROF">1</value>
<value compclass="LND">1</value>
<value compclass="ESP">1</value>
</values>
<desc>number of threads for each task in each component</desc>
</entry>
<entry id="ROOTPE">
<type>integer</type>
<values>
<value compclass="ATM">0</value>
<value compclass="CPL">0</value>
<value compclass="OCN">0</value>
<value compclass="WAV">0</value>
<value compclass="GLC">0</value>
<value compclass="ICE">0</value>
<value compclass="ROF">0</value>
<value compclass="LND">0</value>
<value compclass="ESP">0</value>
</values>
<desc>ROOTPE (mpi task in MPI_COMM_WORLD) for each component</desc>
</entry>
<entry id="MULTI_DRIVER" value="TRUE">
<type>logical</type>
<valid_values>TRUE</valid_values>
<desc>MULTI_DRIVER mode provides a separate driver/coupler component for each
ensemble member. All components must have an equal number of members.
Multidriver is always true for nuopc, variable is left for compatibility with the mct driver</desc>
</entry>
<entry id="NINST" value="1">
<type>integer</type>
<desc>Number of instances of the model.
</desc>
</entry>
<entry id="PSTRID">
<type>integer</type>
<values>
<value compclass="ATM">1</value>
<value compclass="CPL">1</value>
<value compclass="OCN">1</value>
<value compclass="WAV">1</value>
<value compclass="GLC">1</value>
<value compclass="ICE">1</value>
<value compclass="ROF">1</value>
<value compclass="LND">1</value>
<value compclass="ESP">1</value>
</values>
<desc>The mpi global processors stride associated with the mpi tasks for the a component</desc>
</entry>
<entry id="NGPUS_PER_NODE" value="0">
<type>integer</type>
<desc> Number of GPUs per node used for simulation </desc>
</entry>
</group>
<group id="mach_pes_last">
<entry id="COST_PES" value="128">
<type>integer</type>
<desc>pes or cores used relative to MAX_MPITASKS_PER_NODE for accounting (0 means TOTALPES is valid)</desc>
</entry>
<entry id="TOTALPES" value="128">
<type>integer</type>
<desc>total number of MPI tasks (setup automatically - DO NOT EDIT)</desc>
</entry>
<entry id="MAX_TASKS_PER_NODE" value="128">
<type>integer</type>
<desc>maximum number of tasks/ threads allowed per node </desc>
</entry>
<entry id="MAX_MPITASKS_PER_NODE" value="128">
<type>integer</type>
<desc>pes or cores per node for accounting purposes </desc>
</entry>
<entry id="MAX_CPUTASKS_PER_GPU_NODE" value="64">
<type>integer</type>
<desc> Number of CPU cores per GPU node used for simulation </desc>
</entry>
<entry id="MAX_GPUS_PER_NODE" value="4">
<type>integer</type>
<desc>maximum number of GPUs allowed per node </desc>
</entry>
<entry id="COSTPES_PER_NODE" value="$MAX_MPITASKS_PER_NODE">
<type>integer</type>
<desc>pes or cores per node for accounting purposes </desc>
</entry>
</group>
<group id="run_pio">
<entry id="PIO_ASYNCIO_NTASKS" value="0">
<type>integer</type>
<desc>Task count for asyncronous IO, only valid if PIO_ASYNC_INTERFACE is True</desc>
</entry>
<entry id="PIO_ASYNCIO_STRIDE" value="0">
<type>integer</type>
<desc>Stride of tasks for asyncronous IO, only valid if PIO_ASYNC_INTERFACE is True</desc>
</entry>
<entry id="PIO_ASYNCIO_ROOTPE" value="1">
<type>integer</type>
<desc>RootPE of tasks for asyncronous IO, only valid if PIO_ASYNC_INTERFACE is True</desc>
</entry>
</group>
</file>