From 10798fc425c56754d66999dea3a707acffd4ccf4 Mon Sep 17 00:00:00 2001 From: Andy Turner Date: Tue, 24 Oct 2023 09:27:39 +0100 Subject: [PATCH] Updates to mkdocs version --- .github/workflows/ci.yml | 18 + LICENSE.txt | 34 + README.md | 64 + STYLE_GUIDE.md | 16 + docs/CNAME | 1 + docs/favicon.ico | Bin 0 -> 4286 bytes ...rrus_logo_white-Transparent-Background.png | Bin 0 -> 35431 bytes docs/images/epcc_logo.png | Bin 0 -> 33207 bytes docs/images/epcc_uoe_epsrc.png | Bin 0 -> 40986 bytes docs/images/epccuoe_logo.png | Bin 0 -> 22810 bytes docs/images/whiteEPCCtransp.png | Bin 0 -> 11332 bytes docs/index.md | 39 + docs/software-libraries/hdf5.md | 16 + docs/software-libraries/intel_mkl.md | 112 ++ docs/software-packages/Ansys.md | 107 ++ docs/software-packages/MATLAB.md | 602 ++++++++++ docs/software-packages/altair_hw.md | 173 +++ docs/software-packages/castep.md | 67 ++ docs/software-packages/cp2k.md | 92 ++ docs/software-packages/elements.md | 105 ++ docs/software-packages/flacs.md | 297 +++++ docs/software-packages/gaussian.md | 116 ++ docs/software-packages/gromacs.md | 136 +++ docs/software-packages/helyx.md | 105 ++ .../images/MATLAB_image1.png | Bin 0 -> 36773 bytes docs/software-packages/lammps.md | 92 ++ docs/software-packages/molpro.md | 67 ++ docs/software-packages/namd.md | 83 ++ docs/software-packages/openfoam.md | 75 ++ docs/software-packages/orca.md | 75 ++ docs/software-packages/qe.md | 47 + docs/software-packages/specfem3d.md | 55 + docs/software-packages/starccm+.md | 134 +++ docs/software-packages/vasp.md | 125 ++ docs/software-tools/ddt.md | 137 +++ docs/software-tools/intel-vtune.md | 144 +++ docs/software-tools/scalasca.md | 56 + docs/stylesheets/cirrus - Copy.css | 53 + docs/stylesheets/cirrus.css | 22 + docs/user-guide/batch.md | 1026 +++++++++++++++++ docs/user-guide/connecting-totp.md | 480 ++++++++ docs/user-guide/connecting.md | 501 ++++++++ docs/user-guide/data.md | 363 ++++++ docs/user-guide/development.md | 441 +++++++ docs/user-guide/example_hybrid_hpempt.bash | 33 + docs/user-guide/example_hybrid_impi.bash | 32 + docs/user-guide/example_mpi_hpempt.bash | 32 + docs/user-guide/example_mpi_impi.bash | 28 + docs/user-guide/gpu.md | 579 ++++++++++ docs/user-guide/introduction.md | 40 + docs/user-guide/network-upgrade-2023.md | 51 + docs/user-guide/python.md | 556 +++++++++ docs/user-guide/reading.md | 60 + docs/user-guide/resource_management.md | 343 ++++++ docs/user-guide/singularity.md | 397 +++++++ docs/user-guide/solidstate.md | 110 ++ mkdocs.yml | 80 ++ 57 files changed, 8417 insertions(+) create mode 100644 .github/workflows/ci.yml create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 STYLE_GUIDE.md create mode 100644 docs/CNAME create mode 100644 docs/favicon.ico create mode 100644 docs/images/cirrus_logo_white-Transparent-Background.png create mode 100644 docs/images/epcc_logo.png create mode 100644 docs/images/epcc_uoe_epsrc.png create mode 100644 docs/images/epccuoe_logo.png create mode 100644 docs/images/whiteEPCCtransp.png create mode 100644 docs/index.md create mode 100644 docs/software-libraries/hdf5.md create mode 100644 docs/software-libraries/intel_mkl.md create mode 100644 docs/software-packages/Ansys.md create mode 100644 docs/software-packages/MATLAB.md create mode 100644 docs/software-packages/altair_hw.md create mode 100644 docs/software-packages/castep.md create mode 100644 docs/software-packages/cp2k.md create mode 100644 docs/software-packages/elements.md create mode 100644 docs/software-packages/flacs.md create mode 100644 docs/software-packages/gaussian.md create mode 100644 docs/software-packages/gromacs.md create mode 100644 docs/software-packages/helyx.md create mode 100644 docs/software-packages/images/MATLAB_image1.png create mode 100644 docs/software-packages/lammps.md create mode 100644 docs/software-packages/molpro.md create mode 100644 docs/software-packages/namd.md create mode 100644 docs/software-packages/openfoam.md create mode 100644 docs/software-packages/orca.md create mode 100644 docs/software-packages/qe.md create mode 100644 docs/software-packages/specfem3d.md create mode 100644 docs/software-packages/starccm+.md create mode 100644 docs/software-packages/vasp.md create mode 100644 docs/software-tools/ddt.md create mode 100644 docs/software-tools/intel-vtune.md create mode 100644 docs/software-tools/scalasca.md create mode 100644 docs/stylesheets/cirrus - Copy.css create mode 100644 docs/stylesheets/cirrus.css create mode 100644 docs/user-guide/batch.md create mode 100644 docs/user-guide/connecting-totp.md create mode 100644 docs/user-guide/connecting.md create mode 100644 docs/user-guide/data.md create mode 100644 docs/user-guide/development.md create mode 100644 docs/user-guide/example_hybrid_hpempt.bash create mode 100644 docs/user-guide/example_hybrid_impi.bash create mode 100644 docs/user-guide/example_mpi_hpempt.bash create mode 100644 docs/user-guide/example_mpi_impi.bash create mode 100644 docs/user-guide/gpu.md create mode 100644 docs/user-guide/introduction.md create mode 100644 docs/user-guide/network-upgrade-2023.md create mode 100644 docs/user-guide/python.md create mode 100644 docs/user-guide/reading.md create mode 100644 docs/user-guide/resource_management.md create mode 100644 docs/user-guide/singularity.md create mode 100644 docs/user-guide/solidstate.md create mode 100644 mkdocs.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..00d2429b --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,18 @@ +name: ci +on: + push: + branches: + - main +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.x + - run: pip install \ + mkdocs-material \ + pymdown-extensions + - run: git fetch -u origin gh-pages:gh-pages + - run: mkdocs gh-deploy --force diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..5fe2c874 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,34 @@ +CC BY 4.0 + +Attribution 4.0 International (CC BY 4.0) + +This is a human-readable summary of (and not a substitute for) the license +(https://creativecommons.org/licenses/by/4.0/legalcode). + +You are free to: + + - Share — copy and redistribute the material in any medium or format + - Adapt — remix, transform, and build upon the material + +for any purpose, even commercially. + +This license is acceptable for Free Cultural Works. + +The licensor cannot revoke these freedoms as long as you follow the license terms. + +Under the following terms: + + - Attribution — You must give appropriate credit, provide a link to the license, and + indicate if changes were made. You may do so in any reasonable manner, but not in any + way that suggests the licensor endorses you or your use. + + - No additional restrictions — You may not apply legal terms or technological measures + that legally restrict others from doing anything the license permits. + +Notices: + +You do not have to comply with the license for elements of the material in the public domain +or where your use is permitted by an applicable exception or limitation. No warranties are +given. The license may not give you all of the permissions necessary for your intended use. +For example, other rights such as publicity, privacy, or moral rights may limit how you use +the material. diff --git a/README.md b/README.md new file mode 100644 index 00000000..0e867cc2 --- /dev/null +++ b/README.md @@ -0,0 +1,64 @@ +# Cirrus Documentation + +Cirrus is EPCC's Tier-2 High Performance Computing (HPC) cluster. + +This repository contains the documentation for the service and is linked +to a rendered version on Github pages. + +This documentation is drawn from the [Sheffield Iceberg +documentation](https://github.com/rcgsheffield/sheffield_hpc) and the +[ARCHER](http://www.archer.ac.uk) documentation. + +## Rendered Documentation + + - [Cirrus Documentation (HTML)](https://docs.cirrus.ac.uk) + +## How to Contribute + + +We welcome contributions from the Cirrus community and beyond. Contributions can take many different +forms, some examples are: + +- Raising Issues if you spot a mistake or something that could be improved +- Adding/updating material via a Pull Request +- Adding your thoughts and ideas to any open issues + +All people who contribute and interact via this Github repository undertake to abide by the +[ARCHER2 Code of Conduct](https://www.archer2.ac.uk/about/policies/code-of-conduct.html) so that +we, as a community, provide a welcoming and supportive environment for +all people, regardless of background or identity. + +To contribute content to this documentation, first you have to fork it +on GitHub and clone it to your machine, see [Fork a +Repo](https://help.github.com/articles/fork-a-repo/) for the GitHub +documentation on this process. + +Once you have the git repository locally on your computer, you will need +to [install Material for mkdocs](https://squidfunk.github.io/mkdocs-material/getting-started/) +to be able to build the documentation. This can be done using a local installation +or using a Docker container. + +Once you have made your changes and updated your Fork on GitHub you will +need to [Open a Pull +Request](https://help.github.com/articles/using-pull-requests/). + +### Building the documentation on a local machine + +Once Material for mkdocs is installed, you can preview the site locally using the +[instructions in the Material for mkdocs documentation](https://squidfunk.github.io/mkdocs-material/creating-your-site/#previewing-as-you-write). + + +## Making changes and style guide + +The documentation consists of a series of Markdown files which have the `.md` +extension. These files are then automatically converted to HTMl and +combined into the web version of the documentation by mkdocs. It is +important that when editing the files the syntax of the Markdown files is +followed. If there are any errors in your changes the build will fail +and the documentation will not update, you can test your build locally +by running `mkdocs serve`. The easiest way to learn what files should look +like is to read the Markdown files already in the repository. + +A short list of style guidance: + + - Headings should be in sentence case diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md new file mode 100644 index 00000000..186d6280 --- /dev/null +++ b/STYLE_GUIDE.md @@ -0,0 +1,16 @@ +Web site style guide +================= + +All images much have an Alt Text included. If the image is purely decorative then use a blank tag i.e. `alt=""` + +All hyperlinks should be descriptive text, or an image with a descriptive Alt Text, indicating where the link will take the reader. +Hyperlinks should not be "Click here", nor should they be the raw html link. + +All pages should have a H1 heading - this is generally part of each page's template. +Subsequent headings should be incremental without gaps i.e. the next level heading should be H2 not a jump to H3. + +Headings should generally be written in 'Sentence case.' + +[Princeton's guide to accessible web content](https://accessibility.princeton.edu/guidelines/web#accessible-content) is a very good, brief guide to Authoring accessible content. + +The [WAVE web accessibility evaluation tool](https://wave.webaim.org/) is very useful - Browser extensions are available for Chrome, Firefox and Edge to check pages instantly for issues. diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 00000000..cf1949e8 --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +docs.cirrus.ac.uk diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..32e96f1a66ac9309f1fb14e449e4db1c271b57f8 GIT binary patch literal 4286 zcmb7H2V7KFy8rGBWhjb^2vQunG(p7@5dp=5qF6wpWKA|%ca7QXTU*}SO?(Imt8}Cd zouN!&%AH=A0fyd&J_u5!#AwQHvU%AgCi`^mJ9i9f^4s^z?l-^h-uccs_x!)}wQ~*t zhJGF%Kt6%D2EY*j{1Kxj*o&BKHxcUd4tdH?8-D&t5C9~T^uY(oz$r_BdQq?$Pqx`C z{@g{>O2A9o0Ipm!2aagB9Y+*lLyo1BJ?Og%c7$on${b|hze6kdsi^=nGaKOwoCK~k z!k#CKcIS(CnH84@Qi{p~D11q%B`)9Zjmr->u~~tz_uHrVY&?X*6>hane%}WZ2onEg z=tMNs$;k{j_*M+(k#iAHFWd=2Ndi@@ig%J}&jt$RN9edb)*hD~TqRT-*{IO|(pxM) z;#gnjWd=_G8hHK`zU+WwgEZWSEexaJc>FV4{+uEV20(JEJtkHiw-?IeoS4c8bB;LF zLf({MA=SJbpl&(ur)YZ3N8WTUNZy>}rf$6usBOPQm#9zqiR4GuiJRiA`SN{UGR+wu zq@$-mx_3=l%1!9&+x2tMK-&82uh&C%=DQfKh+8Gqo(U1Dj=D5RBmMZYFdLQjoKtiA zWp7>gf4elbzZ0d_{XVR%>$+E0f0k=&&-+1by=kG^wyT>|Ef+Ux+Aev>wMm&Q5BZU12Lre1yFY5id7mQh2!@5neSa>&3ut{-CTHcaq521Y7_bX|W6SL@!S zr`N?{&CL{y&j%P80r2yCt_q3I9Io;=Ehr`Yc=Rv-5lD)##7 zICG^dZWbpF?={vpQZk+E8rnR$)py`@f{j~qS-DhWVfmUxmdN*QvF;d+p+u$WCq(K2zbJ#z zsa*4(!5i!P2h!=?`i$NE!;Dzu+k`t)l8EV9rRQglzB0!Z`@QS={>|kzHfWs|#_Zc> zL|(BaQCvwQ@)=Zuly5U=1cQ`|O|mh+#FD6}U-M-hf8#a2B+_;TzO5}4<)+3Y_Vi>> z+Pbbe=sK=C_YLN_4~;Q>4U_!f`Neks#pUjRB>rtQL-I=*v3Nu|Xg-Izy1Ub;hRH1I z-C3N{HJI$A>-|r!@w>IYBZk_&$lr03Q}XTe^BwjJ^K<5GvEKnE*X}Qc49s{d1Bmo& z3uJeMUyKoXg|^?8)wuQ43+a3&+qo?_f7Rbf{mN@Ci87|m7ghBh3^pF-n|#xUx`O}U zK`l0B$fI@XZ`e0?UUP10zf5oMxe=%zE{Yl&tBD_*;J-QEV*s2!^CCW3;{jey$cL_O7Uj-;p3TrG!v)!X>-ePb==5ykZx@!kuiSrV zh+&IegDYxn#xt@oF zZ-|TxLT}eVsA`p+6P*$=XYHC)&Yv(+ml@0Kl zbIzDh^fE9QQ6{&O_~e>N##P6tJmnEIDI5d`*(s&2k4yQT`?@^Ur`) zRVPt!)jo$hK?JSwbAZCzz9!5U@ulj7HMr_TfJBq%rEd8ZUDtitWzdkbZeXa`b$G01 z=hTd1+tP>qo--fyTM4DHHkE8QekIG=SnxyqXXIEA<#krZ$~uQRu4LcA){#uJ+Z7a; z`B-IEkM9Vr<iz3=ye+9kC^Ke!O0s2YLF=>hQ1AHQ^Uuuc1ObjruA>(D0M$NA zRM*Ur4=jj$R0j*oPz|g=c~NQosJOxg$z(mWFp^wUP9v(=4oh6o#Jyt(x(k7A-6^5{gQWq(hWg0qIn9p6`~8l0C&sBCefS}*njH{b z%Cw)Ws3YfpT8FYoJDBP{vP%XSv-2%~sQ*Bq{eZ|Vq#3I@)=xMhw^F_$2AxQs)9SSV zEPRW#NPgH;-SUPnIzPB8b?1Frx{@RM2TKyhC-KPHg*Ml@#nuf=_XoWUb8>5*eD}$E zq5D)(IrbzylR{+XlKYLdk=$=?W?(2ktdKr5R2xkCAzN8JsuwzNm|VKsAFsTaf9R~r z3dT6%?Ji=~VY)<vnKRrMi_)oi~(jwoo7$?+d4sd8j9xo+3kqL6JI zMYzSQr83BC0=Rtn$NTR}Eg;`rcmgzMmxD+dYb$R$*9yK2d3xMp~K_M@PX0lLxW@0ztoSWugfjBG{sli zRRE(A;NDCiEGz^(cRu>Jf)9TIcopeTrrJmmsZMQ>H=Xs9XyW}PjmJDBjVGg8ds24v z4in!o!|zbGeY9%M8l5BDFNMZhwI#{&y8s9ys<;2J5S5H zbseKp2@}q(Tkl~c|44xq0iFG~BYc){)9MCElqW~D-JUH8UE3huNt0<#`D@$X_HFHc zZ$nG_A2)aO-P+ebT(qNaq{OSUKhL$JH*-&Sf6jr?J9Uow(IU6@zGP=*%UfZFiZY?3rlw*fuR#9vP1>SCK&NncbV?n2ziFIE9cL|49`jW-pYswoqFSjsMQ`qW zXPr{_MsQo-4OdO;+urJyH$Akimv-qo-|=qly5gnKp7j$dk6DrPpFaB>B_-v3jO6Dn zKmJGJGs@OI^T|M$FQ}T1Qu&gDj$+j*Kc4EKomhLwqL#JW4BazaP@Z~<$-776Nmr5j z^jfhd&QH>m{r0`u`!~`;Q6YJ4<)sGQ}OGm??V*v_thx`ahv!*VN<)^ literal 0 HcmV?d00001 diff --git a/docs/images/cirrus_logo_white-Transparent-Background.png b/docs/images/cirrus_logo_white-Transparent-Background.png new file mode 100644 index 0000000000000000000000000000000000000000..ac7a1a72018474e090e9ec355145ec650140623b GIT binary patch literal 35431 zcma%ibySp18}|YtNH-$AJV;1)Eg&sQ2}pxTOS3dfDV-vv(jX-r(xJ4}(hW-pOVIQ#002PvL`C5_0D!d#0APS{@1lD? zKvlHR|7hG5_1(3dt=+xM-K+qQEuCLkF+FiKx3PL|Wp3%?`ol^J&4skn(RbHZdnRGw z?8syOi-yPB(FM&707%JryO>+pTe&m6va+#r0s#-2+ks4WmLQ;>uo|D5i@cSsor(aPPN$=lJv2`u3a0{)gOfo}im<^?kSCULh10sk1K zucpZ)@9bv9B+MhoZNbOS&myai;Ii@k|7|#ji%rR`#8CqdviO1 zS^gtI!3u2QX6NE==j_DvOQQKJXAgG}5N+un7aU#G)czIh1pXT+bjWzU&0To;dH8r8 z9e?@tn;Pu?-0Ht({9mcTIzBE|yw9z`&K_-`42NXcK>fh{~@%r_?OPb!_DEh zpOzN9Rt{E?|dO6$JRi#07-J z1?7|!f35)Y72=WW_iOGxoQua&0|1zR`cr4Mp^1@;Y0`j7d6-D@j`1ut@ z#KlB}#6=(TD+-AV$tx-G{fGC76WHC{$-?TF(qFuG|K%0`fAUJmyIGmLJG<#PJ3IU* zRh~FFc!1sAeswd+%l}FaVJ3Dpa|=5scW1CI$FK4HYq6D^ou`$hlAE(5)9(S1u=~H* z!7nZ(z-P_(id(=^Opseh)XIYU6`!a8x4E^*D-jEG>sP|UR=~gfmj4sH|HR<`i=P*5 zl%<7)yPdm()!&SL?BM>lo_y1(?e<;yY;#c$ExrqMo?`*bmLQi2g^jsBaxV8WQ!tOp%c&y_+x0`Y4 zF{e7+gY=t1-5$Om$$#?>k4ullICvc|<`G{xpczvITZ~ecGe`$e{4E{tT*~aMWiCc} zOy$#PEWAD?g>X~g(?I6+;HgaEcq9OK@aMIDD`OgR?ZGI<@8tvlU_{|-0Uo>@475DMy$`@y7_&BprMz9aFs0RS+KJQkgayzRvbfTAvQUYMja|M)ADWHURO zuKP9*qur(>Q09NS0kv-*&}7px1~C0c44SmW;qFZU`AvxuehaYizY=y7u>32Wuc%k) z{%Q;+k{ELRhH288XnIA|IsaeE@v8@i&D~ej|I>#h%grZ}ze0q;C~$v?=#PLPSw<4D zG!>SAduPx%&qn+IegTjPouMVx&o)bLak>uJEpBGPpV;1eeS#HM|JQS-5uA=k`S`_) zED|@$gR3ninAfVG8ULCln@tR09X30^4Y@h#CyJEs?JV@|(4%~Sd2$2)S4~kjEaHv& z(X&8b=;<3*LEMubeV3M7PSOKX#`{YQDKxpux3hgLf6`bFCv9RSAntKyTVt;&Fa@2dCt=#i9=e`~!ZpoDJe?#%}Fn|kn zJ0ILSU-c8E6>DFDdtwp^FJeG`;A^4(l-TvpP@rSklSpz;e%_6HU*Eenn^W-CIREL#mAxOA zRRsvte)fa$u*|&vjMq%__*HG@TIHdr1O)WWxTwciBDmU_ut(u?Voyb1fgvK-4_!n`@Q7cSxBbT1h8nqI&}yJEU^I^IHbV=C@_Z z+tvfybDahq;yn9!Yuy|Z>y{BpHO``W%6tyRckM6meIHLEp$xq-_KBlGJ& z2Hd$gG#JauY>Mbdw)7CbH=RK}um}&pb=;lyUlsJ#;kYIF2mliH#NKgymIge!Y3^zFT zQTx+$zfz`=fkB!0rx9S#o1B>JKYjqvt3#OPqs$V;!52DCe$}j52Tl45Ud(cNcNKhf zR!wg1-Op5u1n0e+>ef{8G5{GOjK&6IDN@5i%df|qaQoyI-c zIYvD49<%qKgzZeiy;z}MTZHlKj?a9Q>i$*g6`w)k@Z_F(Nyy>*VLfbKIb^N|W*e0E z<}$zQGMwxfC))c3QuEOqRB*qY!}aza43;k{wbK4n?m+zC(9}q2%^_OYeKI;cOdFkW z*;%#+qO75KAd z@GUwfHmuU3KbH{Y@bXmBQ=eBpNh~AsL`E_b4QGj@>H;0jt}XElq00g-ppBE+34(uz zhhFGg35lQ8M(z&7Y}XOQtaHJO1}TP%anSMb-3Ptg^H}LPl?Z`5zQ)h916hzOdz+wt zBY=#-p@OWDz47bp^^Zglb7h-k_phEu#lm@3|;b6m%Trkr=UA>SS&3s-?G z%%IJSa>VeYnZQ{iM(nQ*0Nj(z!@*Up$ob+mVPAaVY10$d+YGTN+g6uPjo}vwyI@#g zG$MLNHzMzCr?|yHqkt6c0ID3nE17;NfaLG&2LMPmL921MONp0t_8MFQCJI%?Ts*E0 zfRV1SzF2O!{n4dHq*t73x*xl1IbDC|S3F2M^IVN4AuSI&fqfy$DZjSU=tnjiN!}{j zM3xjEH_cUS%-cj2%TvbI`zQry0cJ-$7prst3 zGNFBAf20IGrP}~Cw;2FOm@N5pA{iy4EEX;3B=IgR?v486k_6T* zxm{<7C$$b@x{|xmE=hB`^8673ATZ1>QJm5_Kr|HsP4;8=oA%}e&EOJVCcMTQV>{)3 zF)SBi#EVlwl`OZdt5)5O93vG9Gyhn@v(WLKMqE~Xeeg%n@+$U4B8izl<(9AW$%lnA zZkg)MHjkFu=Mg`EL~=7dtCv}p2fDn1gRO&dt~lY~9&C6TtoYEGZlCt3)daojJeSz} zYZ8FRF#D0KJ6!8CbSYlc4c0$Mjh5*XiHxJ`^r3+F62a|BFNa8`>wGqST}4vGZc`VK z&v;q99px4rNFvUqQZIXciddTT+I;L@(P7AV9$0AkS2EKGE%}xnWG{UI9&#yrua}L= zU%MAE>DPRg3+JYyra8D+Bx$R?aMGZ0u zm$-N%1e_Vt+D+rJqZGu+-tUH2IvKMe41Q?^qPC_DF(74x#vvJ zmm-f^sc2wauOz5o*T72>4@x5N`Q@82?5qCgB11P{jW6fL;Qdv>FFWA$^uvjlgU#8> zyizpgG_H{Kb^MXi`!9Sh!W_Q`x2IKsBD&yER>a>3QnQSFRcSA9pFH{kyjI7Z2!-=; zvIPzhwe8J9xg1!OM?tO-Wl+UM+Pa^1%q>aBi>R{_0)9a^hlq0>t(aIb2#dV?j+kIp zeUUx*dMM@Aetv#arlxU?|M~rqAwhmYv#q3Mt1WjGTTQLh3?*4XXG-H*^M0RyRfAr1_AQ5kWfIGr z7tO)(IIzo$#ypm8Z9nzM=lfOzI|(97AupH$j?)WQFY@+orkbx{J?_XO0dKVKD`vj! z8-TVGb7jhRjlVwtk=xa5uvCABnynE zS5i9#fo*!Dn%g03NBY2Ba`nOt#m!iSPj4Npf$b*as6&JVyX$pw_g9`&Fx9f_jzL;^-0A9BJWO=$KPaiCdKlMn9f@J8yj65cLvnNbJ1-t^nli&>-@89?QL#6V#G_1E)CP) zfmBf!{T{g)cFRO9QY#1F3_aHDhE&LhY5M&a2v*XB*Exe-d$-= zx9ziG)-BART8OO+Yc!YJnj`Zy?2a7;ERWd&w1|Hj6&yecG;t<>;W=7w5xZj}M<@ND zV=rttd}1#BlQoeZmUc+? zu29M7mx)r^5|%HbEIBTOd4GyfoA6NCL{W$CGT-vtNucPf${$^hCWR%9ZZ3Z^Rg~+K za!xuT5N8bNcLmkswlNYxV+59%$$aF7HDC$;iar@m8X=0^+kE*5=B=i5*!`o|0Wzxs zU)k!5nXMw(O=dJxlUZl=a-kOtzimRF4j#;sXX=n_;i!fWq#6jRNBgbXZ-lW=b%;HZ z^5SY)ADL%kzGKPTw3A}bn^dk;UNs~>Z7zsZ5yif!NQd_y1WrHejCIxEXmd0wjU^<# z?RJX`XwS{*zDJ47WYo9ay57hm&?*kSA=oGq@x2peT&jjlk{RvIJPm>RJ1{x$=Qk7g zbpuY{LFQN8<5keO6#Bt+k8l_DlygY^2yG)>b$X3!+$yZCFzc7cOK9Fs`sO)N*^T-n zb@X`W!n!63%ip-s#Ka~oTzZ&fd=1m}Ywi{Ix&v_fVUOc{@kjiwZY7J5kD|v7z{^8B z?jDcM9|u23NLfr zLD5yNt042?&nbJiqDzagZ2w$D@;oK6ocHEI){j=4{chlfXxO{S4u7tW%fvuN8qCG~ zst$@{^j#){>jV0-sg(~t5lswQtez`E$_2mI!!f&1q;GE?D3SH_PgXOH@EU%0=8QUFjozq^?y(wDhG|ORcw~|;;aoJB%sxcH zk@N)!Z{>$dJ4*&}WW0EJ72HR#Pb%G=-8g}HgykQ~-vPKIp`v(EovV@+X`e_2Z6WQ@ zG%chi&>n6sQ#K%e93N|63viuvb9}ZXqG|X!P%zURZ;wz0sT$#mGRS1>uFqsN&9@nO zxKA9`#~;Z({ba@83N`%jr!Ji%Dnjha01PKrg(vcg6Zq=9X{<+C^dSF49XjLrq8EA0hN)M#6C~9lq$SijV+Y0IjRPxZ6<0nExF*`Q zy;K;Wx`?GJR}<2;7-&29k%|(_x9=-$GK4wKrr<{%fy+o^>ji|VP?QR3n_&49_CTCulhyPTl401m!X7bf2_bj~Hqw8WnuJ;IHuA)0=Bw5yz;Co32eBeX@9&?oa(J-s)@mT%Ej@Gx~C*k>tt z%pCrSCjatlFU{&r%CKK-s(YK*N3vKmu4$g`+ei|`#_N4Nu)Q!xj3`x3iz@}4R`;rd1{3>XKPKA7HgoUDZZ_x!<}5YQ{BTV@kgByzQx8oh!BkYHZD!0nG14xdYVRKIgjPEZB0!rklxZX-g@ z9GHjL&Ug3sAJgbUnqwUMXROBJ{E`< zyf49Ja8F_~u{^A)gn1d-a}X)khDUlXyVW*p#$jT06*(rpMx0`^wX}ql-?|yeOh~^5+4{*F_q1G64t~F6zsaJYTkdjP6p6UOM%Me4WIm5n z>s`m$`CIbc_mO(c8!wWakgVP5OUK)w0tjY^u3dE zKw)Vn#AA_GX25@MfglSl`z{?LjVdbJ6K6*jmA?r8^?#%ou+Cc{KYS>#(v;i&(n!!IKCNBVJVc|0ul1^8L_2 zrejonK)eqet1!r3FB@wU37CG=tif$QCZ~B;y>tC5)b162Q%jrgZASTWx*N) z@EQXzf;-QLNS2i)7)2N&!`>2RAd8w$7}DTZLeeFb*Zrisd6-&dP^hGGd)~mR%hR~D zVv-#JDZx16LCJp)(KZn~Z%G;vc*t4JCU*Vz%p(?5PR~_Ja(3!Zs@M#|f9BSslR zNv7r-x-GM`o$yRqh1U6ydKWX70n1(wBK>#>AKxsJ?dO`FC+vV%q=!#$ zWO8b!uh`2Y37Gvl1CzgH(7+N&d-SN@;)lD)&~{vYV7}jufH}+6CLTQ#cz~@#j7(-u zsvt-(Ov%kWi)C&aVmRUAm!dKsCSSTRL3HH6Ga1FWC&4vTVQJq7|E~AO4JVFMN#4E` zZ!W|7k)E6@h4mCh58AmyyxYm!;|;zo-k0OcsqMXMI*^qr%y{r_^^Os&FSkqfOy{ddzQ|BHSCzkRKEm)Rgjq z8*%EF7_Y;IsU3}|BTTtBkVFRsS=USosHkDR0LcLe?lo1CSN$oEUy5w${6wYJDOqcn?A4k1H z5!tKOQ}}HldV$hsR}kvpa-(&TFxgv;9-G+@B>3_I1+O2SiAHAn-A3PbJi!FSG%}ob zh&cY-Y9raaM(XJVG#neZKSsm#wf>_w;DtmNXZ=)yp7@)~H-hF|WQVeF#1Cqkjz$$r z6Z`}Y#U_T0GNk!!e^zRe)Ad7Y)@Jfh%bxJe__mEp+m)MdxjrvQmTai{d@fxX6+WD# zSZyOf`9|IJVl7SK*U4j{djWX8D<8*9-F*V}lxS9J{HNkdFXnrx*wPZeuAD>ik69$b zIA17HU)?woAmq|T8R7kL{&@&`;ik=Riqz@6QJ$FhDB+wM3(rMq)0HQ#!wz%S?=ear zvc4^rv761PgfSpz>UwGHN+sJx{2%4XlWjJo`YOYXQX4Z%^ptOZVCoZ|-XyWu>d8H0 z3{4m&+q$^L&hH%is*?CbZ{h`Eoo zLM|wgRFqZ%23@4OJInX#(mzT2oG45%8~!!i#qx9tT+dBY`hgg<__+I$xZlhqpl^_O zA;&h)3*U9=S^A?(H>hVlF(L3?zJ0whke1UF+FiH?fngmPndKTxqbeHbJDc`Q$W2JY=s;_YE7CSQz4_F2SA3}DyX_gJe zj4c-~H#A4-Vk&PbuzTtj=ZICQ)e|x2 zC=T24GOq`|7nSNK?M#l#i`O<`{2U3EDyt71KWNKd8C65pr8UUHIcP4A|2#~iXzRY8;)k;>DZL-{UQN3hRKnWl8Z zKDmgp*}?1K_%M~}_5tpNSMh{A_;CY8%;vs)W7W}0p4})nrBu)YMNk_aLfHHRMU1fI1y|Gd7%2E^$>Xh zByh|tov%uTf_q^-b6wlB;HQ6z+&{`!dQSNT6=snLE3s3}bF(BO>cF~|#HRfyn4S98 zyGzM7(~2hb)#euemQ={F02_lD$^h1N$q)-`A4SpK@UAYBNo0ldz$AIhb=riDHRBe7 zn8#PbG79{*O^Nlowr=~jP#Kkd@gFmr)2}8U8X#ai{ya7eIQmgIx$YU{?=mt? z*s*9khn0e}3U`pX5=yf%Y|oTcZ6v=k$+?duU2xDlzeQYvv`BJY+D)nsl zDEnx%-|j6#Hh@H9!K;TnMs2qWkW~G)G&k?#{rvK1{9%K`R*!T=U^;)ea;6L%TnOYj z@u+Ix;dKy`Yhxs24F!WH;YyVIr`!q2v?KwblHZ8H7wZ zW`NJosx1VIW!i8hbVC-m#yaL38lk?Sfze1o1+$0@`d;IIkYXC}fUT z7cq_X5*<{*!7OWS&PorxNbSHlz29SSP`7E_k2CL6dT<4dA5;!|Pvv&Mp@!?sAnu09 znp#;(l$9&F+sgTythV;jP}Zq!b`wg()zFc?nk(?Y!S-E{%Wv!_WfsYSYvXohZf7Y* zi*WCMkcHM1`sGzMK4E@o&#obmx%V`tQDm+ZtPoLW_w#!JieghFP2{yE?hFKk+6`5_ zKT%~TPqL|aHu0i<*vLUFjdG%F=kBgwYTReJ+4uCRY8NYNv$dMiPu$+fB6%jLJD&7O z*d(*mu7m0d<3Y#IWoHAR-4H5Vn#~JKc^I!;CRg zpDej-ReO6CW|BFkR`JgsX4t;;Y>#S5iW1G5#JXA|RclHUDWBs`L?T?((@WKF!mm=Q z?9}j1Bv7nqqEBYq5&Sje*-u5dlVmH54pQu5%HNwoW@>YA-KVd{6nG1rWgKlWK_MWz zjP27tub~3n)A1LStHXW)+uGI;@_V4uf6k}Hk?&g>o!Kta)9YhkmiQ>6CkbsXBwKSc zGaGsFg#1!GE>uygc3CX@XH=u6?$ECmImFC&rZVZgVRG5U%%DpKJ=L8cq84Xnl2=`d zdUt&LRKfc{f_djzwk3xX-?VIu7wA?gs^PkuiIhEyw3t$Cwb=LCf$Y2~QO%M2B}I7SMP0+&)qs}-7_Vpgc1p*E{~1Kq2k z;k>j5m)d7?ZWaT6D7rB1!m&Z}0%_kif85~iVpR9QM&SjE)*MxI+applIi`tE*e2wg z(y(a1*QU(^$vSFW^8aTNi3q6#ESxnu8wiLJ5+FGZL~ z5KCS!m^J>v2zg4GCUPg(G@L|as25kUaeokcZ z%sp)^xy6(xRIDuK>7;Efd;{Ia4$w54leiC*HD!8^kT25@oYUf^MFmVVNOAJ$EUamj z7{xyuXeV25Dt@j`zeCNu-1+!u@9^wNb}!p)XXQ3-FbS}R7_(Jf9DmYfqa-~!7V(+w zSEe|36zFo>NYf7YM^&|K5$W#4lZcc(2{tVZzS#TeoD@^x=DCI0N*rJa!)|hJcEQsb zmJ?1t{jLFy>{P<`O(NxI4sDv9TanoHcc{(QacrD3TvDUX{L0F2`A}b?4 zG0LXYQ|DId0rU=Hxu99TH{%DOQBZENdQUmD3r6Y5Q? zwcLB*gW)9X}z<*^8OpOXv4Gd<9>y$oCCGpcIxLvX$b5o>{p@!WR@n(4uYgBiHWWCkTh4b zz1k1a{FagKAgO>|O@4JQXoCDAywErKz$k9ze#R&IutdF?L8+0SFPIF7x(p-wd3I0j z1>H;-kK3F$oT@hz!#~i@dj;=5x8pH%?>pX5Y&RI=kq)JV`-nK|yd2*ab7;f9XktxE zsh{e+`5EZsV8AcZUx_oXjTrwl*fXRRey52G%V1}?f#-@64=jzigW3AdF<8yMnysb+ z)8BnNEZ&>*u#B3%%1%V4Y->2EI+i|`mn>C?eZmo;bG$M0>Cwq-NS(VnyzmM%ZbO5F zl1*8(O&QZmORV$8h)Pou`}4!|Dl{0892GTFvhR;}e-vjdE$7}%*(%P++{)D*pIDV9Y!v*Pn~tC6=7Wwk1GL|3PaCBhvd>paIiK#u8i`nAHh z@Qa0GIjNxiXu%$6?!yfDs_9(PG279QAb5YMV8~DvT8@8GP`~AFrKMR>Z#Vw_&AGAH zKSa|!sEWw4(J4Z_Q^m+G0VA!!XZ6BBa1zUj<9y!MvrqN;h6#rZsU%y`iT=_T6Gby$ z&XOLoUAlNS9z%?XKJ6Sf)P)8eIL^xPfgWe4ZjsumTK0fq!D;lFo}URm$YqzxGU7nu z--&Q{ksx_K7doE8J+dKdP2Nr^VjAj;MR`r^BQ=ofcb9^xU+uuSCrg)hCRCei+V$58 zMr=BI4u)v~pP)T)J#_}r8jOAf^7lMyo)6o{3X`kv8#(RR)l=Gv-?4{O6B9xom`Z~v zk;eURns`ZOHZWaD%`AwX7S92WQ(UiGu(f4E zZ`zc!7WUMa?77Lco@rt9I_QNXEl>e;u=txdaEFCPCD_uL6?V+n0qIL$@Qk(Fg9!yLC{m{By%W+ircmu0MKWVzT&s1 zFB5EjK@L_&3eunIbz+F)=G;)|7dfAcdtIe-$guqoLq0+SQI@g#-yrMY6XybOf#S*g zz%<%}tK`m?kGv()>koRllvVsGV{r{upNMiwMTG>c+YExbw(!Ff6^yx-i_WMr%rd#! zyGgbH`X9+3{1d#`Xv&1XP-f^x1&^sq@Da3MepmrJ=Q|(iMPsDj!|=ytpGeJp9CUuO z6Js+Naz9>Nid;*U;k+f1CL!sp2llQN=Qs{~e`;mipw;ab8&$t*7AoF?ngYLtAHbeeu4QT( z^-SLl;{F5XRG@#tIQ!AtVZR~gJ`HCiG6s3K^A36BB3(i`Eh8Ce0nQiEJI-CMcX^^oIY|I~894e+1R%Ho06V63 z0nd10n`qnoJO1~l6-0L-otQej?9_5aJ2nf4aLU>6R>m$L(tP|rJB(85+@}KNSy~co z5OdO51NAJbaU=~kQf13FND2)H6JoeYUj%<4RGY@%{Qge%GnR`TyAcD+gP_L>_tB9o z>Ivb~Jt6a{-E6}_d~TPx!o*RfPV(D`*)=L^KfY$v_qHVTn})ZJp@6Qm%t>E^GV~u{ zwH>33)|R#nWcu2Gm6_uV>tbPEa-X3BAuao{sff|`WC}xz{>*w}o~NJls7Z};ahsKe z0Qle;d4O!UFLt!5sdZCYtbl6o-o-y7`I>>*?m6{hL7IV7gct^$OQ%uO({h8oa(4!n z&V72m!nQ1UYZ=Dc`bdU+DLpw!w&E?br~{V0(`cs~>Mr1A4~Th)f8w~{#{ z!vbhl1i4`@cGV?5=e^TJ(I|-a2f;lNpUeDGdA4&YS%0c^!=BmQgSYv@{}c18lQ#GU z-X{9uEKjfa@ZfyOAR1cCcl{ZG+TR)il4+iEVT6azs+{7)V>mrOihS?;XACDvH zw7!aS5GS2=vC+7mg$~2>wBr))TM21y9$L)fM7R?`F-gpq$v%=5KrnuJvCU3@ntp0Q zjtL^tPiU>ypLjJ_s0phC=yf1PlpRXICsVeD`a6`%#68G@Z_@}Ui@b_6C)F}*e&=*A z4e%xttzZCJAXdMKwMu78)`7|PvSC3!1OuPcTn!T-E9`(D54h4L*zW_Ijn zl-()YKD`;tLi@rRy?4M>pl4}TW|D!39b;a>QLVQ~#2@%bNMNYh8+JH79m^SlWLADD zb3b?c``G<=^?O)d;(Q3a4f8k5WGr~HVY1O zxWpM=q`>JL&0hjDD}0a1nH$KG=ru93@I`(XS`fr4D`33LEXwW_td$M4s)ERGrNc?_ zzj5bZMqN6^ro6eJzTQ$R2LkIBviZo7trpAyif85r(5uVV26f`l!2I2WJKGC{{=33v zQxTW6)yVT5?4_TOlZeisDUc>r>j$RaK&KcR&^;hnhR(RwL_506uIk*3s-Z%TX_#YA z7YWjNq47;m?%D=BN80-6sPB~&DJJsMnBwf<$_`Edj6NT~Y>UAa^m(za`wiKq2G@K4 z7`_>;rq?Zw-q+vHW6)|SO;Uc}70BXU*=IL`#f!Ie7vdLQXTzgqH0RRNDo1qXyp?Xk z2YFB*VFr~=xTnyhIyVK#kg3o=4(D{j3dZqwD}|pe;Ja4TTMQVonjmi;eJA%L+ak8i z!m0?@%f_KNf2sPR9{-msvT`#2MBM$>dPyZoZu1fkrBE+CAS+N z)2G!6SAY{) z1}}TL%GXKGXz&k=F2qV9o%aMu518R4^IA zo0#mS6SeF_=5$7Vf=5-&6f`Y^9d2f*xx5BbWWNU2>SdgwSAyoWxgVLFiPKKOL1Flq z2I2SC&xp(;tmu)|@Zks~O1B{&yg9vT)0}Rk_jyP0 zVr2ZDT$OD8uaeLp{0-V%z)05cZ0EZ>O&n80$;;%NruyngL-b-{>cQQC6&IOm?Ozb!)O>&w*Hhj!Jv){Td$U!0REs>F)cR{VI?Gx6p z`a4Sbi_zy-ash=pn~TI@U88+Q8b6&gB69SeRTH#$vV#~U9@vZ(>`7RUH$~Zv?@P_s zHf=RhY&9ylQC8Q?%=6^41*w5^hHX1|=4+?p=4yE^>?R?x<69GO7GD`O8g!Oa9U$}m z&5jae_`-d7`#ORfUUFbf)BZ8M=^*m}X&qwKg?vJ{pO^V{*&fn6f36efqb_EBmFZXs zjU0#=ui>7eyK6M_5Ew5sL|Zt^o@YEL1+p1H4eDi@WX)M6BxXLrvSM=j-c}Au3Kr`{ zS0X&qNK>{6SE0wLfr{T>GQav=hO|Rs|IU78AI6)b~QNSO& zx={OWPL_#!nvd+B6fpAeyLi(U+xj$WT+^-MFAA6q#P1jTQt7k07pUc+CVdIo3p>yJ!N>EH@1*!%alm@I@RG*w3?HA_JO3+_%mi2 zLxSdsXscrj_Jmnh^ihg;?v5Op7zn01BZPb6cABe^HZ>{XVN^|UvREJ-TT;=^*TE<} z^G&quQ(%*mY|oY34qfj<@pYtwhM4P;f?V=m?DX{H){hkKsU5r?=3|nqJj^w{{KIzP z36^*3?^-GEx42@Vv6huAn_4bWW&tF_0~?1eL1|$&DvLNciQLN<0*;@GgqnFq9D!ic z?1gh%F>sZg)qom%0!>E&ifU7LK&>yVMkGWwxwiPZ6J)m5ALouQy1j}Y@>=Yz>_M2? z&Z`WG2N7!a;97^p;h;KN(KC-@&D5Cg5Ef@+mtiRuQg>dT1bwr}h%|9rSfqKGX9?xooFyGBoDnkuv0!mTc=_tD1dDh3S#8{w~>C58HRq zjj?(xM9uz1Hgc!{MTRNe?_bmtEAZhH3@ALb($M(+-M?>465!<@T zu(GL$vGNDe*BpCW!){kB0jN;cSM`Ngjq@xlS|$qhvn6dZjf82?l93lQVH^|bHxKa| zpsM3Gyfqrk%gsTT6qxo<^#Ne-d27a%S}Xe_Dt|l-m>icv{;Y8DzEK2cXpI|q|p*CGybJy`x z#a4l|!`wWFi4Bu@el+J?TnID`AuRpCt>mKLLkXu;z}>b?YJ$~*A6hImRk*rzRJw9_ ziCM*?_r}XapNHyRaYgYQIYXpUSQBCq5vMC_^UW)Nt7iF}0#ka!F4aRpk9Dc!hj{F~ zc1@nl6h?^b<`#|gv@IHHB5piW$f$0wpQ+snov9VDYQGn}y+)Ce?O3iS0yb=-L@qy!K^d`aq@m zT7!tI$#oyNLA0#h_l#816V*3h7BE#AF0bDY6RH&2BXK|T$}bP2I41mi?9x{p!b~66 zIONZM?&I3w<5eHxy8EJzG`m*Oe)&@xo$kqJe^V9>EW@rEb)@&qu9{L-MudM z&PM@rGQUoyRjho^(+1BAQRsehjCiJ3Snwpgv{ZZig!4j;$gVo&y=bYK%SX+LFA ziPE*N1<%!f4s9usG^fsVbTN?OE|ET|ekpJ|QbfT`A?n5MZ4tc*oo{mlDu72j+3&Lxs4YZ_6 zRzC_&Y@5}Mr5p5jx8-5BZh_KjIEU<>655EQS@=u+_7^7@giipq^W|=Fs>&z)JAKR z$+ov<`w-=!WCf!#kUvkGGt5&7*=|_MYU&2EFp5@HZ?s@pwMGe64%_~Sk$l%yclz*n zBhTY;c0IjK2|-%=w}SPsGIZTHzDQXqiS1KM5xa2+-igNmp^aiJE%OxN{V~pc6Xf`6 zj8uaod%GUJmVi*2$kxlYGma_5RV`PGhvlhBb~AyMX!mr(Pf&IFIA}du`v&N4U2$=t zwmo%5Xrf5OAOC=a)tO%4+s-GYsa}7%F6Z43nXb@Psei`f z!m4;h${u015~jU)fvc%JE*PSQ;2vpC$=s3k@s2<*R3eV1f}k|2P5D|o*IqD=<{1;?CAwBu`(d(jccTL zgQXIkp7mAe%TCm(2jIw)^(J02CDT=YOZ=ZU4D5HCit*P_yB~W`}V(RF$fr|3-USl!wmGm*a7z*sVwPy{ewyF|N83DtBa8ch9}4 zLLY8)q#x#xA1Ny}c&$qAX~L>CWez7kxo5+qYK6WcY=<1Rh1)(5|#cms0p*k<06WX_JKssgPl zg+02FTtCRym(eG);s~jx_<3tm;%@9{ z+IODriyrT`#^rR?F6`Ta?xusrGpmpQ1xBO5w$6a`_3&mYTCNF1yW83~i-Dsc=088V z$mTJmcGUOc^1EP=RQ1|4)I95(Cd6LYDtbC!N`G!;L&yIGekpTeQ&MBE?!fv`=b-X+ zvG@^893NGr(8Y_QF+wI5AuENQ+o!1IFCQmvXHb`PD z;_)M$8N|wn>2afnB^^W7BQL~l(5MvpaW^V@$}P0NX_9cb z5!88FeuMfd!&I$x5a@n!16^I>&p_7NkCsDILAwSe&1Of!oCF5*4XBN&F(%h7y2c<6 zZXQprF};bw1$P3wKNVCtT85k}@>|)|ob3fKs7DMnP+PuH+xE6*7Y7V%kfl|7{(SkL zlP)M2@VyCDjv#hD#iE&*D+BmWb3X)KG3TNVt%~1-h_z?{x1FErvJ-KsdeIN}emRnj zZ@>-hxAzJ$1^@PBgpM@YsCD@a$q?m{f6(^)CTgE#lPJMf9vKqPPXF9YX_K3q5W}}h zvUToqlw`Mw+N@%OyEBJ6?Q)AFPyHp9^yk^>%!uc#nSoVD$3s}yGkK)H;y7m8&vp+*iLhEMz&AcE=?;cbfGpeNLKRSzdZ|wmqtVw+H}v^r zfYglycvu*b{kG3V-t{c`L$cAAP)PmTK3nUDEjAd=MCig4u|n!^0}rr+R`xi&K%(!S zsv}z*XO5Mg{%5*vvXJi7a^z=Z8J05gC}zhiS}I%xrh|vHA{bARW&>U()W_P)0SvLo ziyx5(eqt>OFh7pAXamc@RWZ;A>J9k$0^5lz(KTg1;ufLWIrGA)$A_7}?Y1Y}u{NZi z{QelxxAp4B6hQ=8omQ^KfS|17yI1_L{Dnx5O17I0|pp zvAI0Fqn(ID?nWfU?PF>*A^%6VeKttkD!+1HNyH5+*`p-#+$CfWS->@taSbtp+q~;I zT~d31N#Qbm5`i1tJtB+!rN7X=-^>gkKZkKY1zb;hir+=Wm!aPpMi_!Fps|$S6nkWJ z06KK#bj?_>Hz5iQ+-vr~YPQ};Jf}kX_MPcR_;Jhy-mCKO!__tjXWsz<*G9}+gdfm} z(509qop}o-JbroSrt5kK0DBsbryuRcl<~C5ah7KC92{2)2D3Pt2(ze;M|V{tMbL4?F75k1DAoR34n z0}U6$95xv@Ay!D%z;Ff%z(w>Id^4v0;BNT&uqQ({a>*3TAx ze0Apto2%pM-Wh$+(%XdC#qV@?E8N$fPLFSnojr!tUZE@WdT6vIGy=81+tE_%oFCs7 zvDoDla=yc1J6)GMKEO)9=vQ~99tbM!^6;Tt2zg%a9D!JZ+-b!I>f+;MvG&zxzrdNjBq%RC`W) zks7NgU8EJHZ@_XNZh2-eVK#a7ND{-b=4a(D`iEFLWf)fSspHRmLM*iAmSHqj%Yv!F zb+JcboyVMD556}?`f&xO@?#PoG`2m;sg&VX0}$cwvecv?+s0;AXo7lY;rSru55 z!Wv3a1fdz`Yjd?m8cM!jBv$TPRgDz*i(J)KOx)xqD<=eeqTSvAemD67&3aI4XSH*h zhAwG|*x5yI#}rMB1ee_<2v_v8j%Sa>w?7;uv@r5B<}tMUy=vK+zvLGl{@B8pa9(<2 zmhs7E=%}odf3@*)XzH+O8Q%dkht#XSzf+83dcT#Yx)?IfRk$O#{VtPURV-6YfRAPIDRI zQoT8}UuI2R~NFXbVUBn(}bH6&;z^e!S5YYrU)v+Qfhq!R4Kj_H;BBf=9l z>|>Z=>=-R9@s)%g)3WB8xwVr?wiKARx!uZjv7FTl^&mMxPL<53CEhcU(q>7nIT_E; zY@9;IYB-FrexU9~H|6USGGh$i3c1Gw)mDD2y5*YK`Wk5XthyT#%Q^97cr*n`jwx!e zY0G{0vvR&(Q#E6^Y@>Wn;)m8blaL|lf$LqFp5m^>*LxAWf$(c*wuf7iGlv+a3*DNW z>vDcQf!HR8g6NGSqCFP@s?00Or2dS-;g7~OvF)B3IYOGCW6m94!=A!#)b${rO@IFN zT7AWw6p-+_LFBcwa2XTEnd2X3oQuLR=|7YF2A)G5`4#kEq`CQYKJJ#d^)>)FrChr| z41oQFVG99`Ae*VWQZU4+OI?M=QDIBAy<`>M&LRxYv~nhHIg4_S`*o$Oou#DRAU#*X zA3*l1PFkI#Q?AJrB8}L2aSbG1ypaLvLXnlPWXI15D4lhv>ugZA|Y$GA})1mD@HxD-WlXA8>0o8a|Yf6SCx2! za4OKpqaPcDV@5JtUAZ}K<{u`cC-g#&%ck)E)B(x8w1R5cCeuo3Fvgxi89Rn7MAln1 z2k8+fcTs**3$n?Csu01r&?MxchFNr}-^YvgY3y#qVM!vE@P@S5puj6!STk_@*PY!{TLmCZIa2`pQLh`Cr-G|qXKlRS-BdBUB(y$;PE}wyV5KkoM z_`-*S^pQC3dltp-*6X`%?YlVC%h7y3HqL|hiT*@hB=69=)|-YdRJ-N(L%jW zZr|Zg%mkM`x2qB1uoz4z*!i~&sjRFowcoRQ<#}-6P`*eJ9q`#;T6nTn~3VB)Fex zT{EgRRW2@H9E?{&4yODjoFozz+A|GcN7QT?K(6bZbOwYlGn*@ByrBk9iB`(#ctPzm zRNeIs8SoyBy1IPWHOStjm0%2KKVD&z>P6XhkxIW?=?~NoQoC3TBYY<>=WjPxMwT0l6Xjtn|5$lMvOBGNz?fa?Y~J zT#%F#R!CglabUeC`L2O)~8 z3SN8np2c~;rBSPtnJ5}RHE7d|$M%NAIr7X(qP-m|q1xR8$fWD6ZI&pPCe|z3I#BHf z&+t2_g35+3JJFD;Czhjnw&Kivcu7SLio|P2m(l5{&*KY==b039J`O1S23Rxx)fGG` zYEUyniqVD+ZIZaUG~yIK6=R6SG=x&nY3Jkgg?N*Son!RKtKV#qJdq9?ZpV4--X4^2 z;xJ`(BazH%`m=diU*!x4?54i!TluHdsEf8|UU8m^eK(ydf@2(!d`$R!50an-U}wLp z*9i?wZ@eW!qfM;f7m%drLd)T{F}Fz!!I<3C2H6{J_??1V$Uw9iSj5SNZhB!|#{SM_ z8;42b6m4jK!nLh@v*oebi#?j>Y zcu{&=Cy8_MHfn>ItP$ge2s@~E%f=R<~9 zm2k9sb8XXr=UC(-f+UX0t8k~y2lqM=qS>891K0dn1B4@G(AvI=6tum zzEM3UnE)9p=Rbb?%)YI$9=pGB!4nkM@3M;%%qw?7_7g26F1!QS|B%U%?zzQ%7xb&!`nqZG)CxIww$#1Zra+S%iUUphh!b@asI$wT+W?SD z)3?NOQLLlR(Q|=G5aeOZV@;xP)U({x~a6DFGGMRrY4JX!OOYU}>T_cObidUZ#uDXo4h;7e(kpE};-nJJ{g2T~& z{dEErk#`Yhi&8-(SB?RG= zzJ=7NGpEs0H!y>q(|(-Dj;k1=-B-96qW>h081X%l(Jj7Gp>#>{%@VM)HA?X9QP|h_ zT@qoom}*Kq`xNiq{=(!?9|YeLT9YG#un(JwJ2O=ph;}>{@J3i+pnQxX`0t3QAlzVs z{K2R=ncOLX8j$2+Xtv__W3tB{S%2dfvEH)pRb(ow+H*aM{j!PHG%kJMDI6M1I36g{ z=rraI(pXYNm?*))dp`U2%m^h}7vHr7&6d?P#iyQ9rlry;y;DjYIeGY~0oEJk4-X^& z>6D1elDJa-o-@F4WAU%1UQ}qjF)=v|6(!6rlm%so`UUo(8G-EisN~6)iHudd#5nEg zl(~pxZGty-zwj>J;@C-WtO%AH> z#Bf@wsN2W`g|)9xrrEHO4x9_(tg7!^kp&|Ru+HlfQXPjsv$AGavGms+Qen@lM#8{6 zHP44iW3p+v=H~~(ERF%5GtY#$6ftJkU#6#k*c3zJcv?-4mU}9x{JWF;uY&*p`fb#< zy2q+mDSozTl#|?LWD;nS{v4+6ALMB!u)G4BwQW(@rY^vnOxyI8qeqxWN_%fn19RlX z3u<}Pp%OpTU}b;{IK z!VcL`FiJhV@S}Zu>hJ!a^`OtswqJE}3p2&DSUpMumFoaZ)O%dlg~Poz(p|0ip1|;J z>NX9%ypSS}Yesn$mus`ZEwhksDI^8H4#9THk;UGq%!PK^6-lg{aNU!^=*L$a&&`_6 z{R5;PRD#HZjIhclgb2ueo4>D+dge7;lkZUS_a%%gIW80XP8xqE@p?>)uN3AjR273* zn}yJWx8C`85bB@E=0w`b!L$fZip64jO8u{TH?^+91tH;n1@(LiMV#a}1n?@BZ|5T4 zt7@NlHiP_Hw~Hh^is`&-KrL2c#nLCaG9N15IgtN3sG`EEWpQgCKD7VSE>Yk|yohn* z9!fwQiN%W~?OZEMn5-1j01bpmyp{Q!-JEMgWpk1~B2MGwXLFHuw5vH!>cqWN%HN=h z1S%0sBh5L&Syd(N1Bi0*g7Y=4V2?gDH;aIDYL#rmuNRIndKS~BoRXIGy3@|0nXR$D zQ@TZ8-f)Nzc-)k1RXNVju2Np!j(#W-9Q(D#!buMgdC zOFEZcXczGfX$)_A$gbkSY#y#|fw-0$l(L}JY$5EUR$%%J8xSEm{yPKN#tq!?VeSAi=uXsX{64bAS87$w zYxW;NGPR99?~7mNwIut4vty>%uHO+SG)?gC82;02=HAjRh)j=f#0M{qk20uCOMllt zRUUnK#7D`p57T6*2C^O67<}XCl8Cy=``4D0Jwa2K9JPnk`^9+7=eqfh=zhhP0Vis~ zF@O4=(~VTJ{34a*+qdc|pM-=CZflBec@-sv#WvGV_cvAA+&ZWzp(|LUc9nvv9ZGZ4 z_CZhLRhrqU8|8|%T7BP(Uvv6hsjCKE{|RQ0xJ@(Nbu0xC@^7 zd}(`|=yKEOhD}o_nxwgaHmhCGOztLD8wT=ZP@G9V87LY7TI1j$i`G{pM<&G-Q#PCJztbl1pY;Kiu z-B&qCr?i`M;m)21pwUo7^!I4*<{6p^*U+P=KgemSo%jN(4xPC=L1>lv>6a=SWLG|4v00ai5f z3}&z|svUCNTXM$9D<>G}sgM(Sz;HR)w9#r|ja5{Bl~^jOrc+G!fnJU5OvWdKt~8Q= zgsb^Uhz4Vw1E}8fZ^Q6~;;pmr`5)>uPfiTq)L(yf`XO*nr)y9d)!c0@sEq5mr`GqB zBU+)zN_F-A^YIw@X7YIm-Qi27Q(b_%11F8?HiW&*uVg932WKR`@mJ*>)r4#s`PolW zechl1zu+>a&T$;S%Y9)MhZa35Dr*j&w3@&SaW0FU4cRPzUqCrrD3gOUI?o@i?wcSq zFkuMpM<2nxn8-p^AeixiK4IovQ(H;L(&XnXh`q;Oan&7s%`uH!)sb>rmZR5IlLK=~g;|i)q{G8(LvkGUh&S=Qz3(R+ zv2flFLwh5VWJJTIGNe=l=-*tn#R%syyX=_1%%A5rELrAHjBZkzHo|YxO`nz~T!0J? zaI$`W?QYT2)Ww>e$)%J7qns>k{PKwm4I;pKMN78#t5KCz3lsqX$*~BTEDQ9#!YJev z$ZW<55y9{U5jQ!N@O2e*Lw;A}kkh*Cxh$!lPnsKfj*J6GiG7rNp_0yFE~-?IWpzCJ zY1!xK%@ob#-9BacLaf7$ne%{jO8uv2JB!jB97#VV*PiXV7=z>q$yR4SENq+9RflC5 z+MjX&ZGRfNWtl1xIpLFBpdc@&g2|EX`N-x(w~LhFgfJ#7V1}%EWmfQQGR(LdjB4Vy zR=`B_@J9{D?RF=IpQE+cOPe(=Q)WRF%ZAT-r{ zE7W=^2@LFxeW?Q%i^IUPtRc=%&MsM3JOVuI8w&=W&1rV~am4l{ydG}&**QkM8ojZ8 zjCY(OlxTJI@%<~zc4-qfA{SX#H8j!30OI}1a7p4DYcmF}+ZH?DtmNV5B4ii~I;oN! z+p$4rr1k7~)lliD>=R?6YzNXz-&rvnnbKcOtoTk0(yBl!a8BAb?6?izKBaf#F2Yk2 zWy!7;ExP@Kfl`&%kG;rK`2A;JDd%EZ#goD9 z`|E+A_>)2QNLa(V$;=JnPU`eV>iUFd%U~L@HQTh?`dRlZQ2gtf(8-{RjQ&MC%wHnf z0eoGBC%OzU1kbWsd)|gQIfhNYK3H`ae~NwZip@lqMmNR%a;;d_7$~1dBDT`FuqmtD z28X*+M~+y`DW6m6(Kdi`d~f`B>JL9qMtvW&F96LsE(&iwzIrQ6zhVuX_0npd9+bsR zbv!g#`EXEaNDxtX1nC-z*FH3{|H5k-jf=;Q3pJXxF{!1ViW9J(e(+*Jsd0@;w7B`5 zTjNy2q;M0$2iH?OWaJ5Ar@7P%1kcg*Qo40dP{O(>4|MuMpY0w3)DtL=<-wjsac_g&+{6Dn5#CXW)VqI8q#cSV~Lax8U{^Bt?7 zLs{^B`9ey2-m#Z>k`2=;E04#b!kJz~aMmZ8xlKgeQ>{b@v+b(zXOssHl1s@)n@Pwh5gQLhn1qy3JMt2Fn)YxZUFu-F+0nOgeW>qBLLL`SgHUmE(qXvi zJtHJOTzecmNk6|VX4)_~<*_f}IO1MG0pP*KV}zRP#wQMD{HOXURX4%HC-ik+C4D*S z2PYKQIKyWRP3;G+GsYuHX%;3ryjWseio|UvZ+|MdYC_HALF}VZYx{A_5$WnIM+9rp z@VHHy1$afzrL}ebTO60Vq7OpPIsglg8cLSkrSP%J2VN(Nzf^S8qG;*jpvzvbW zD-8~RNzmVkn#azpqd)t3|CWN@RepbH7WWr=;mOVVYN%AnSxCAvQoOqMv-$l+-wtyJ zf5^jPw<}0f(l+lhi}ZKD>psJwuW3dZ(e#%G)2LmFUi0)GV{jecxX<=EKldG-mEe#Y ziUpSe6;%DjgjD|J*J0=>BB6@D#IIsG-gvb|Tpf^;l@97kr*d{noEU6~j0#VtV!nMB z^_OSUQ{(#Jezs*R5I9e!Z?QMO3|S=`NaUKo%FA~Z@LMn;x8Vb4Xn1k^^`%K|ymw(o zfi!O`F`U^9JYgQaMBWV-V|0n|&||9=*`VZ+PY(9uv_@qY1u3mK0+mRV3Hmq7EiGn@ z0Y-1$zuN9p`n)H5A2-!73CR4tS!etC(3^YqOWyXYFllMnVoh-;`>7+d{-ymP8&ePj z$Ch>UQUg&?AZ?9%&RlRlsuU9590!d8TX9vp;`ya!r0n`T3UeP78$w%X0?84h?fr&U`E2lq=fud*TKzIER?;kcxt9p(WLZ_PuAJU<;)#aTA?1@eO;VE;~T#r9M z4al&mY8hUhql7VgbQGi?nt=IPMm_Z}$lW3DW4}&+n~y4^EpXtw%WxY@8WNIw9+>P( zvrI((2p};uvbll8METp5i7=@Yjho4SlO0RN&w{KIK>5FQ-_w=V|D=MY*Y2ysP6ak)c*6cNe}&ppj( zk{2u*Zm{E04rCh;tVNvSLWLg6Fz?yd*e|g$S_D@|exS+VskbKtB;ot0z}&l8l~R=) zSzJD0+%d|f4m#>v*?YbHok`6pNU0bU;z($}Tje5R^A9(F?R2^kET&}d+-*2P=Y-rQ zF6dn|GmvWQ>4ei{bc#J^N%FaPfY*IRyxsGUsc$>44~80z8R1>eGmbJTQv_>)rG#FN zUef$LNtD39NouJ(5!&NsTRSnx?+pQ-ZvF`fUSClBabGR)jDF=mN>H=v8FayTU6!Z# z&}{iCnl;OTXQVk&REc1%-4YedEPmn2w89s-elB(1ZpUF9)!FKxky-VmPefDn6G|dX z>SR!_hsH4tHv7!Wf)sduFVP-Kusi)hO|ONDzFy{a)h&(ZoAQ*}^~;T_Y%F737^Yo* z+O=u)p^chCLiLbF+Nr3LhLZmyXMs_pNga+J0$ls%ai;oB%E`kz3tc2+O!ns?kvDzn zr{*s5e;S6?0k|mDTMp3kmZpzia3j5;5389ilkQm)JC2n1tuAN*J!pRHBqbjUCWvFM zG3|!onfO%E@6&KBD)rpduNMx|%&5tC4KtxP&0@-tMcW~!9z=}vmE1!bfazJT1SWC_ zx|A#r#HPX3h{Q%Ll&KKlkr{ zKmNMI+}v*Vy^bc?`lQyvW^7LeNDj24wK<^os)aFco+J|@wB2$dy!QHANIFs+xNjva zxJ3UBMtv(Vls+bm1Jr+w#l?D(D^XHCH1!`UC+A;`T!+Mi6XQfzK0nz?ovLWa!yDm? zk4+OX>lP1h{*HxxW4U^d4Xni7*FZwUZWN1)i|JSE&;;$`Ywe)N27i~<_|32P z!z#zx%buQO2qp3~Y_`T)^Y{A?TU_Cac<0PnE-{7St(zeSmxQHAf1YE$Ek%?R?w;>b!+y_YqEjSg3gVpdn&XkRa5k3 z!0YVC@LGh8-dx++jjsxOd%T}~KYL>tJm^DU-T?b+|Jb{yXur;8v(Jy%bAkPlD}o8B zL!66dER5^#q(NK|X|#qN(5Ke+X%~zh?XDvY;c!h79$O?oT4x#8>E~l5daZXy!wI=u zK8cJ)a$eti-x)-o)6=N6TY6;+>qV@SHWM6m%B^A(B*MvHejM)s>`qEAmL|$Ny2V)0 zkG;uW^{B5>f)`bUFW(mbs^-C4B8yph{BTcq%esaT$a_-?D`7bDOe9Wj@-KR@B+LkhUb>i`J8F z$C7nEcPI-?%LO(U79{W~qmmNMhQ6nsYG|7+@Sn4hI-1z1EiHnRoTc%p%&OH9-hu|P z?&}|qF8(wIOqe{!v#5;yYp3>CG=ZvaLf-~Pmp->{%kAg<0))At)gTMr$S>JdB zcA(L!{-SDP)bqVw8;gX= zG1`J&DU&{6N--0$k?I2h<;4M<1md`c0(>U=#5&rgAA-vi?kerhb<9rl_0h~Un&uh1 zJ;NW!fz%rCvw2JKs$uH!?W<3bwtai0)U%vzUHB7~`D#zkUf1 zB(*>xC(lb_PQN_i-GD_rH%lafohQ>)h%#zXO*WFVk12#$H940|jgq8IMU5Ml@~b9@ zj1_OoiTYO(rh(*RrdmM>H^~XB@VF-zuh_?gBE?M;SBdski1!o(k1BuyI(NKW4h6lI z%Ayb}#@B-*-u;y@)g>Y8>4R&Kf~Wp;;9^T5fM@=G;yDih0d|?ET7O5>aovD?Xx+I2 zeO6%8M1UJDHj0@ncJE7$b>e$cSUh79{V6lOB7wC4;E=+B?dGLno?QKv04Hym=sQ(W zYgN5;o9sW<1!U_tF6je6$!|VcXg=70@raG#l99hR3nfsd6QJ-P*lEhGb5wtEaC~B1SC}@_{ zp@&enOmSGX$>L;qMw6@S$x*gUU9f&^eKCDg5l1do1kGiVe!k5h>r)tMq}rWLcPg$_K#QzcEeYo>G6n6fyzUlPnClB`UhW7mQ_ z#poR8Zr?*oJ?q8>&-aV|Oj5H$1vwAuq`_E3yP|pJe0?>bYF$RsLCP=COchZ95?6M% zvYI^M6vFfQbL8u6qapw2IYdc?-00h_Qx~9Mf54^$6XgswdwfCHmHhCiQGms+kyHo; zadiN8yg2x0M<<;r(--vG2zGzG5Ya5u6x(Tk53Kr)A|1*V+u2@*Bfn+$4BbRKgurlc zm0ATC5(Lm0B0;|bcrV~6J;oGw3%{KtqenlU17dCje*U+@-o{To=fEa?H$5a+V5Jq^ zHjN5n$zyoyr*qFW4NipQacji{4)K(m2~4}P!|)gmB$?P70p|!#vMo+Vwjjc#xw$7I zZ2=p?XQYpqhctBI`3^x>fPr05w-wS=VX%*qb&aISns8kP&(2^;thlnJ@9Q z)Cgot@N`K`+`+?MHwQ^~X82kvl7?>9Cd3r@@M*|jCmpB;WB8^xr9&h5fXPSjs1qj5 zl3oS&ClWaz$Q&C-py3C+8Le@wAM1Fzh}BqybWA5pZc z7hmxKB%9MJk`IZP@Bx$#O3kPJg{_J9F3Drc*f7#b(scZz3q{dOuSrz-Gj&dxfo~X& zkWH8wvtdK^Nv*jcY*m5(eE(5c?9YVKPoQx7y~mjPOeYTTCE;BhEI!CM2Oh}KM;X)S zU=w*&N(M6<{qgO@_VA2%jJ%%IjMEKMjt((Da+TeK?1LWvlsA>I^ZwLA^jYpH*MSli zutQ9>LB~b_52=EGuE_aNVofQegDwZiIUu~727uDq$2>lB96^FW!+B9p38IW$L#$dV z?Q}z@c6gv+f-1F*$C<_)<}Lqsx%_WB*-e6cEzeF{^!zgze(W^7Q|zyQBbh=B)4}|{ zU}f)KTU(ixNMxxhX04E9Ycx<+Je99?jx-01=oveAG2>M8ymZQOkpccw8d}OrG1i^@>rRuX0OfZ8ONF(G*6vyIuP;W4zavxD-?ovx zTzo4`ZaXlo$XPaL0#ypCtJii8*PzZCN2qg?z-lS{G&kB`=aQu9vn9^UTtM;UeKQCR zGz&zWCO_{0d=u)bHXs>$YM6P;L@uYD4s3`4&-Nh7bw?3(;tase5~G#N^|s-jbr=9Ywf*$tW5p1|VxRqaAu3)0blZj-})`9;JtcPxT0g;yr!)x_oB zuK_X)GwoZ2n$)-q0%OG8zLSkZjEk2IAIek6U`B(pplR3loR1lsOE%q2;t8t__}oeu z9^U|8bkq{@+$i&wl{{cVG4&Q8;m(<*ls zQe~>4bD*0jLpu^t7ruXQd@FLV(97s61(dFC@2?*Mfx49-B`oe{IR&wGjyz@SSnHra zMY`PwiW^mCoi|WR!px1!9#&6JZqE~-D`a72;Q@WHwP$T7slBXeU*D8HrX;Tg2`}q~ zZ*+fZx9o-mk68EKPy^8g%jWkD71Rl#L}=CvDrwd1Ft2*vmm!i?JTx_Lz%@f-q!2R# zRM8zj8iRTl4m4kO`M=*j56DNs#mF}Sn3+N*92tN8nDM1g3${YF0giez=zl-iw98gB zDeKNn#c|9le~YsNXI#*9>-~pKHW$rTl1G0Y{Pg1MM-+ki?{c9#fX^ez^&?N>=4kOI z#5^?6CTAj_c7J#%D`1S?Z}YyBxl5h^(|M9?+Nglkn_`6LqP+%T6w!N1fo817gPScb zbhaZ2J&gB(PGMb&4!RwbX}2#=IV6?^*V? z>j~}}!rVV7a+qwBhKvM=?!Pu&3!z+~uB0`}X$O-mYj#gub>Mk0OJ9ngVvB;YO|Jkz ze|`%2Mw{(vYuz(N=F*}FqE50eyLEuR+4*dz*jIYbjs>wwSQ35n#wNWAb1@Z3Hp>wR zx(S@6tk6Te653=lfwTGF?VMaL5Ji--6pcQGI0IWSy1KTdulyQdxoto4QEKuo9df$p zu)l%Ke(dw9U^2XrPeeT?(Fb9YS3jg&8Jlp@!=^Bg1PM20C62rYTK$Qmov8;Tcm9_A zRsR%wo*F-wdA;`Q3oQ-PxRPM9N5|~Hm|R0?#+={O@3Q5b$Y^~ub|QoD%;3Xs&D8+n zz2Tv~jYB4Kd>w_}u&Grv*(MP-#W`j94&cvx#_xwMdao|SgIl0fB;4rxR6tX#WOxE%)-nxp&F zj-Wknk5$L9UfgvSj+F@6V@5jy5-{dzygcpWQkWf6iP?P2EJZc9A3<>Wc!G8+k$bms zWZh>pyBsh-oI%pd)hh0|4t1F|FbO&=rUq{Q!X%xCQrm+xpK zCDw{xpmhjX$gK0@XJjM(R^YZBhvp>VK&m6rKRxajQ`n?~M6j{Exu%|K@2K-tybH>L z_W70~KiyNz%P;o6$Xc=rWcZZCX;uz{t0i8g>j|(~kwb6`mR@kUk55ADFF!~I76q0tQZ+c-R`0l#4LL$)isEROG%Gyeox03`d{H$td8pjG$ZC z1Og*H5~=qV>7bGKZ4mAkm08*7@}W7OjvV_T{_TmqQ4TvQdt{vp%aK6USJRLHj^oC< zC_9P670C1DA)*ss0eI?(I^pKs@Tmkz?Nd(mYXgX4C^m5r;-Us&%>Ah`zaAJcQ^#wL z@jBj_ut!_Uq#%wpos|qaexs0}0(&E}f6GXcc8MdQL*XLPQZ7|=!lo0RzFb7AxD7!O z_(*T0IsvG}jT!9*}ChLJy#EuI%?21@G9;ib)E&AXzw$8g;t1v=enH)<*DGtunHc$X2-yt zd`d+*@)}0=V3a`U1A?#*W%AxppWb^bE8)g!#u>q+=_y$LDG;;-UyZ$~Zx=6wKDlL> z5jwO31UFv49=@Tg2L}p0%aJMnO?LsPg4e;xBU`uytu|Z!=r@ zNk&!LplotO&>Pd*N!b`LiK6AZ@^?){aK*6PW*9LTEYxO#IC~uBp?ai(WpvUDckn=) zO%9HufYwp?8K!uG7PFWv`ZkHeRs~>w`BqZHnRGX_@^k#m6Ae+b?rC5V(vI1tBD$|! z6mBEbwSGAsX0k@yO>_q)HuN7?LQh)MkzWp%xZWFBXUHwl-?W?d!+`gd&K=qH`Yynf z2HwAM7STAE+cQdXbJnPS;l^?&lRs&P)2E9cltuf`2?h(wzn*=|TDdlv%0oj~uK6$` zx)IE>PgA8jUpN5(xWw8;TQkp(*sNoY%lh-+Y%>`&-M8;S4sR(@OZsoV<%MW7-Zkz} zwqOS;(MLjFdvw5V5+%9ti?*)c`3JJ$#Kc5(lDOGz#Q>MCb%O6!$~}TU++$I#JMZz# z0UFKH^gcP#a&NIogi>83-(Orgf^nxwTDP^}ajSBBk>MnJ#92@6UY`lI>AJWV`nAF# zmMj+K6s3M#tkxIR*Vu#b#HM7Ue9#4&xBS(wquR!R@g6U$Fe_!y$xUok04EX5_XHun zCCU1>uIALw+D9m3PcIFrVC}m;R}DXd$p6bG@9|WHDF)zrnKyGJClUrJRr!kb+{IDR zq0(3;G~pJMIrU$PyFDB{f50q=X?q8>4sa4Ug{?j^ ziRT@LZ`3#Gop?ewg}9wBHeG?ckAleiLT|Fgw%j%E{O!;$-tKFoYpAAnx(3R`_zGKE z$OX5yzl1ctRVVsNY{65&Fy|t4;r44oWh+{|ihSh-Sopr$+|~1!@D`J;6rH5|{B>Y% zvS^F!Mb6rX#nuYH(}S<>NvJzd4x!d-U-M$+3Xc)$s%{+~?}b<7oT&>7@D|74w2(?3 zhfTB>#aGzjnssiU6y3GKz6^Xx?fvmOyEA&=dpUx)UwCqm;p9=29AiaRryv~AEylZ} z71e0EEPbR-Z~>!J64QF~MV+sg-vs~Mh@-nnd?6ttJ-{f@{bs?{M9;g_I?MO|ckvvU z3TGr@s=@kJxyQym&he00YOnb~NL!q0R!F@Eyi%`E=k)OR+XRc0eVl!p6{Bg zS;R{VH!+eguwR?)ianbCrJw1KP}-84OivU#>iD0DfBpP(!}z(ZCj%S$d4Pw-WIId& zQ-_;SZ>EkmW;C6QVvr`pv_MCFytSDDvGI~EaJ6Y;C*}>E6J_jcm5tdl$y=&d=fiFn z&6r#GzzeSrH+5{nbt^acut?;5%VEYkO60$TesEt~lZ-!fhS{J$r$z6D3bujc`xaj5 z<>&=AP$`s(QXtB4jj2Q95|9601|6MRYn44evi?>t`X5*Pehd_hjX|Fzxc_;<;%%o#It&HLcM@;H)L8qLktfr@PTuTdr*s=cg+Q z6o>-H#w1rGlO3lyWQV(gy|CF^-w|ZMOMQpbdy5|9atCBPs~@Y2L$QZ>(=dk%_E#gl z{f#z9;S;OQm7R2yM?R7Xie3K#VY73^>c8Cei!PmSn-sUC?y!K}|uA~m;sxc3;uS!PA>?G9FD1foV?ezZFJ^4JvqX=ZBm0ejnD z(Kr>lUGO;C{H^k@%6Zysz}4fnnRik>na!N!OR3=p7a=ykL-*sliSG7sNxJTzst#Wc zyfN-hrqoVng)-sw&bO~JdnDXUQXVUj;|(#_0U8PG9b8||gk*;1LN@Akbd8{Jf^H`^ zCzmiESO1Y5n%a%Fpkti98XmJ zXnhWH8HH~yN^uKR$1`{S8j3sR!IfNE#X>s0&ynhTGy|@<>y~^fN%#9_`3d#!nMe__ zP9JbkCDphmH7kX7sI{X{g~^}A!fM{`@zBbOZ4>m~eRJ1EF>@_=Ga(^`SARMN83pXs zb8pr=DH2ce%r)LK#l7LC{p<@y5_u_13(t-C07j>yb+E2;$)@z0wCTMkBg;i+1!kuE z;VT@}$hUsNrbsAeiV+U{Cjpq}iO*V&=i=8!=p%wcTu1G!q2@cJrs_Lnt|a2h3F83K zK?ebWB-Q6W$&;1SV?S88Em{h5^Y%0oK0y#I6qWH$q!<1@k;5+nz6vcG-VxAZ)ubdql=mv9RnkDMHHq7bs-s zJW)_q9k`C}n<1Bf)q^BIZof4=ee?0#W4cmLgWorQ!cV=wjM9~(z_ravJgE0iKXFQv zf=+A#U4+$F2A)3lr7Fl4Li{6xIxo4=#HLz-t|YTMN;Gy>xs%?xS1;j@7^J@T8=ZF? zk+e;bTH1^mHym2xvTl$HLhMpKdl1pMt)-D=M>67Uu8q~a8DvL6X}wun`s4fgRcl7B z`s18~xjxa58^*8pxb0sxxPQT;c5?yPpE;MxezShVFL!g;Lk(cKZ{_ks zd{#ocwJg$P3GcD785e{IT&r7uSG4r;j@Nmmq#lvpm-v7yafI{b=X%XlW+#`zcK@3n zf4ENb!j3$`A4QwuY>4KSY8NdZ58~xUF74xt8ZYS|5uN!*f#)k6K^?ny;V$@BZb(}< zknHtTn@{ZBLoe(n@@DN}vF(Y85-Q5P9#NXhHQ4NGzL2Po=Z(s{)|IB%u-gu#5r)Wm zzNtFB&p31V`OE}=l2>r!Jn4SXBhrM(xih1>a08*J+uG{j+OV=EP%WnJFwCFwM@BW9 zY0zQB4}=D5AzH!c9F%RtSfcaJJn`%toRRtNJy;w%sPk|-m~&>q<)j`yoY~0!E`vk^ zd#CJ6*CP?@p!k`sJFKb!d26}qaeOBlf7YSQ48(epo`X=7DKjy~3;d&_AYn|0-Wv|4 z%=m!6Ek>l(#yEFGj#}t>S+Zk<*S5VCUN$awg39!5DGtL1LkDlas%muTKj$KS;K}lI zEQ-dsP~Grr+rV4}sttk8cXMgD9V%K2=Ik_wR!RBlW4f5|h@_V-Yl@pI`|MgZ=Ij_A z8eIv^fPgvxDBcTmr_W(~N8hhMSN5_h=4?K)b+Nq2JzJ>OH~zK{W9rRwT@sfXZq<3u zrIzoFOx|i=q{a7fhJU_Y2vX>AuX`0Wk{;ZCd}?Ez!Q~hfhkcwz|qQFDH5hIxR2P zW@X)?i-UvJKxUU#dx36V3zeqTNZN4|I$e}!wH?fXG`Keb_x#9ui)Nf;KLXj4n0oZ? zuq48t=FK3>f}YTNGA0%GW2a&-*tLPK(d^M0-RLEKn$x}jSXs>UU&AP9x9=j)GjAB^ z190xxe9)gSBkv{BCd$#f{>W6*X1t+tkZ@sKqR%jIJ#Lb-d8r>63Q2mUy_f^09x_=l zVZY$0aTNR>i9Wax5IN{d7UuT)aJS<p7*nPQQ79nU4h7xe^(zp8YtM%4wV~Z^ zO5Q3 z=PtJNR8vm#v#1X(EoEjIFpYVM4&q~_e}s3y-g~W2U7I)jH+$#apDD4%dRooB5Q5_% z8*o$BMy+a4_n&}@ERA5iEPlk=Pdvg;=I?fGHk(y&GcB4^3N4w8UkP?C8~klO3aNgB zI*+ww*ZDq8qodiZ@=b^dbZv&CsQbQS%XfVAyxYt3(LU}Q@;~=7quu@vb0^TI_#xSz z6gOX~Wxdms;uUtS@K;~BB3&MBRa>wzTYcw9+n!a%XG3l$w9lyc=yZ-29?9^~0BfN1 zbQD@Pdp^;NobY_C=|15K9bD!yV@`%3Qxc0TDg9$|IS z`Dbv{d+i=TOGUWR;=o~7QiXn&+pOmmj>GM{8N6+w%;>3GJ3w|5X9ni$URynu0c`F|f)~XjjDPIl(WJxvW>o0k2M*cY0r#B4M+LA;H5g z*Pl7!h{D6=|A@p9O{l-h)JucM1qi$F&f+P3d=*~>v>#BJ{iYutaA9`ou!dI z(zj!Ip4D14kK&O5WTQD>r@`JVs}bY2Lu}elJJm;T-RmT%zcwiK@c-fvmB@&rUaw4m zho3Un9yYXPeI3(%GPB#PfdOA2_xiv08cG1uu@w%wV1Ub<6+K1n_NEY6dnKN<#?_A7 zG&C4u#~PI{K(%BiI7!%__spKv;1(;#Z_GvPST4o5!C>x05l#i+srTQ+X1PPx`Z2Z( zM1mk0tr0hw-J8zV4GofBCd*!S4zga)`;L6)cH36YlK-t=qKU{&K^)i|C0y^;1r1hWzcxZP^h9_M z9em1}-VZlUX$POY@jJvb@TaS9+nX+_)U)?vYr9iw=#51GUM0pwh=Fy4JFV(cTy@qk z)~Oh&ac5e!?_5o1_;$mL88g-`U{hruv7se9QZ{Cs4oJC|VUrV;0voC&Q*%6%Odtt{ z8Nr>`!f@uuGbrsZFlscUb9m?%r2VU>=fdTtRg>os%8};s-d49{xylS_gSU) z#w5N==|9KBaMpwTJRS1uVBv1H2)rX@g|OG!>9h0_TX@uJtgjPjYIX`e5Kex-P=9L z%Cbqp@9?(vm0xE_b%K>7DIuH9+6E8kw|~Nj!NGA|+KV!Cm44+_OkS-hKH!?wtQ+M^ z)kV}1K^>}mNKVQbWKh~d*6sPC7CXtOe2mpXt5)~uwMf&U1Dx}QK6Y6T$;)?^lx*)> zDOP*{4+Ds*{@X0`n4a5zK5$>V>BDGUc0K7i;jjWzwSY-RbY45RCN^?zQaJ`;A}JOg z%w0y42ypa<83NhN z#GlfA;!DH&%Lw|1pUFd=w6OyE)Z3)vbn4$orKQa-S1_hq>EKHul~vwEhH_L`TD$z< zYmNDr5$6dWYoVq}vPfvy5xJN!Hr#N+LqBcZyeqII#K^^_NtNTD zmd8KW)`Vd2THrILc$GS4BZaVDyY9-Wtx|dCBEenY$JQ{xa!#{zomoBlNmZI6Iv%SE zX7y8XQFHTkv`xufZC>9_LrOmrw+!$o;N1qHB}Kp`5m{os+-}0Ko61DV#6o<+qLn4B zB0aJA%uq(0CK8M-lVt}m)t*C8Y>md9_uuQX=3UlL*fD-*$W~S3z+HBj`1FK zl;X6$tgi~a=ORscN6TP3qV0uo@AenTSVz+w+ApE9BNol->w0(WIyt08dun5 z+KI8h@S5G67Y`X4YiKV><2@iU1KjFOJaHsqpWluR^mg5S*R2pD%`N08)mCX17!>gR zLK#oOTO_n!;D?YXGE3xf^gn9@@CU=*2DEPN`^{N;q}AyXiA&+LCG{pHLX*`gREp4m zoQA;3%QhrX?xn;r`1#PcQ}J-Jva*8VJ*gu*&l zj;I7&_n?neCHw#NL&n|W#O#%HfJE&X{BF_Ha&P8%UgR7sCdJf!n!Dw%`gD8wuqC5a z0#;J7Cpze(=5RfF`2WqDe=BfGxMxGcE896mfQz*&{g5A~ia5lc_8h-r>WtS8=y6YL zT5c(sg)n>bBL=XMyywDnqcySrIWnRK1RLwOoNu&A*4Dzsm$|xhqBndPeGqdgX{ha^ z_}}{pq4CK#j*-aEyf8t5d)3h1!E?~gp? zzL}d{x-RueMA<^4-(GW9$3!1me(29Lpvk}rzETy_x9G^RdWN^$t*6H zsqZ>!eQH{?;D@92x<=Pl{yD$hW+~JQmwP!YSAL(L{ry`kpTPDHx9dG(_W57`tY(~} z|6Sn1)|j2m+ihpQ_S6O~D{L^gE8^n2ccta+ly|Rg?cF=^OXibfY8A`MI^SFO*Z6Jt zYh8T%`o9Ri!UEI1wux`nYcd}I*%IN%{p(?|?v<@i%Jbh}uDbW;VZ}7tYo58a<;QF8 z7t8-nhz2L8hK9P781L7uOSap*-uq;|x~<5m^()Bo?C zlK2%=HvtQr_APgB$8P_imb~{+eZ0(vjdS_mL|uI*b+HRlci9=#?%HmhcfrVf!JW^2 zr@yZ}Zq>~gzn=a7-Ox26Hnot@ZeM)<&+}b>uG_Xev}-={QsdoX#<=x6E$!S&AcdNI zfQ9_e1LZ5$p1bAr|5EbZj|}T)?hc#!zfx-pXu%z@7WO-+kn#Q7PsZsNTQBnO-=7w< z?*I3cIyKF>{gThnuc7qvYdAndVe-vwdn=Apy9F`_b-D( zJMS&FJu+c=?6>a5IQ#OmTJw&9e8}CB*54CmzP^ z-hQ6FZjG{@IYYS=` zHqZM$&)z#+K~5YF8XFo22nbG6LPQA&=!Xpu5HKDjIN*qr8w(K-5XhB|pS zHf-ltWR1mP2Ya(rTcVfw5Z`|I9yiAHQo)*{6kFvKhWQw@x05z9O~q$DQ$F`w?APA5 zF55tG4s$GzexL_%!%7867FJ$Dzdyo&VhW%HSx8mZFXVoL3kEvAdLFNMAoy#k_Itj& z*11+@%r#{|1bqtZh-a)))m&rPLu(*|gz;?WQw}>pZtt($UBq(Axx4i?B2e=VXb^h| zu+mQX%C;y^lFkbRN^m4S+AHy8Ek`CT<3U*1m=28~cFM<8|G)pJ;BFyW=qpjgz(~GvOc`%qcvmyd138C)kBmeFZgA@N(XnF zR`scRClz29$1!GX;OD3*A-^nR^U@Xy=1sPYQoZM@s<`VOi($rB!it2MxWoAa(-46HQVR|bz{__0DzN4(JSMq{OqMCS0g}X;b z#sxP2g<2}(Q2=Y~fMd=3Ev;ViL@H$z>fpZ0{h4%-vnC_^cCM8#H}4+ZG+pD8V`X?G zyJ}{t+2?PG_p84hiAO50^16?=A7+ySMthWuWG)fMMB%A}bhimB(&aFF`qsmc*~uZ4 zbJp>1KNt1&GddpWb@;X1I>mfp9~G0ZCgy&HKl}Dk8M9=!!rQ*Zt`C1;96U!H=Td;s zTkVXbw^=LOUnAz56x%pffnapcdfznJSQh$le1NL+l%rQFvE~wX0OPuZdf=Z>rp>3M zOmZrP(&Q$#osJnQO(?7jD=96LE}KZB*Qcu(p1Q20&*RzP=5q=?50pxoB)(POBGr7> zD80T`D6RU)VK4cozS;J4e2VtCMTP?s0Lo56!x0Dw2Ib!y7$_qP69|Y9NK!=bmmBa! zHkjA1R+M4i7IatF(7h|bi!QXi{e2hk!G~io-#p(uz6oOR)$FP2S)9t&HL=;&l{^=r z!J9t)=XS?tsw5dRQCB3WoQxk7129<+Xc`cTKFEK4(N_l{f?H>@!Ti6E{Lkq$$eO(& z1d#uAre_CK-Jbac$o~HSaiRbITSdz)^Z#7oKX+r`09M_Ak2m+J(~u}! zutLGm;S}*C8C9Y&xcH2PqJkYOv+;y-umjO(ct|8cjgG`m+g`T0&o3W`kqt3H@OaR( z1tJM#v0#kAWcuBpd|q!+-QQ2l&O6c9+#)fU@CHJ`;J)%J*J)f@mZ3}Zw~?u<8Aw>aDr(dl&~JVWHPyx&0Kd+_upn$h1JEoym@(8?-76AB{`{bCn}JP0;hC@$JiMppvXmBv^| z?7rtFkxT^^cAzjA-GVl9xL!w>h}u>X!Yn zXzmu*=^q6A=I?!VO&AY@FE_m;^W7-q)89YoN?zuqQ^`2%ph@Y>QdlT72&u0RK19O4 z6(o0(-Q07#oM4Wtw|bK;pY6muc5?Z=>0A)DG_)_dna2_wF!+6VisX_p)91)SEr*gu zE;GO-Q3je?eCS3%Cx~{s!_3}rwOxi6aa?b2^2xotzuClPYTQq~?-RZ{Z=hLhM-YjS zlx4h>OJzWoVZS?29L%JnjiL6zHA8F}fHjRd+;_pJ+FJJ$Ug)}=kAZW-uWzSw4>i)g zP+*++40{8%-k;>6RBpl1XW`@WEmxD6eiDl9XZVowFYh z%L}>TvwC-xZ8#ivu{#3XG&I!`os0F{JBOg&o~wn#>92kX#gC^_qe!8R#q7P-_mCQb zo>T44$?WKXsM+!ke4(V;T|A&^cX%<6NEYGGm>cfy-mW}c_w*`%t6)*9%CtlTNAavO zhfVuYJD5t*$|Sg(UGWQiL=J-Tg_~WSq+71|et^V8PWMh~xjJhNz*Nl|p9)6ceR8{> z_4%$AibY0L7~W}dRF}Ka8w)qVQhv1$cXJQ`Pn%WOyq|#K^4KFC)9DV+lTQn#Ac#p2 z1d$G9b3e;|H(ha<|5hrC%a3ExAU+sPha$Z*6CD-O7>ucpM9LVqo(UI#y!AW#3hI(j z#L>Hg=$JrkvV$MsViGRBa%A;K%9A|JxE!uA`j*_V@3u{&uj^SWKjgZUIG9KWQyD`Q zxV!^H(n?#hW6BlC8OzR|`!y$>=4`#R>EZJol>JByAaWBtAy= z2n{~RcDq$DLu$yg5rSxt5j;5@jXl}vQsxOSwluBTfxYRH)8<*vv^T$0rJP1%E?;br z*j%;#XF;qO)Fd)%g z(r$!Y8E#7$wacK)b!U4H?Poak;Ldrn4EQz-`UD8tvn1iuJ`MLqDom@Gs+QN6uHgvU z(AwYgz@=|8Gh@~z6Bdf)N{TGiDiIjW0k60mZ`N?HNaQf ze)RlB(EKrEk!`JsEb8`EuKI_gt+|4ZN3G5gWj!jONs}^bAb{1g*8Gufqs?7hiwI&` zbSq@OSU$KZx;#&@v4HpixxKl@X#^mrA1<{Gut2=qXXhKM93nUWLUh+Wr0NO!I$9c zyjal766>{hj1rjO%a7o4x#so~XLaFV=wP8f*hFKE$R)!pc?b}CJ$IwEJ!|#4 z-`rar?&ZSKPa0t9N^BJ>=-HVng1CIgwoin0d+0z+sv97F#Jh9FIL{2`eM-1U?NC$Z z@D`LpOsRiVTTw%ttb3q0x)JdN?LEmmoMm-uz+h5rgO|tEQuy%YROnPM(#(~bLB7m& zgZr&GMn;sq{)(PI8{K8G-nhN22znIDvoo2>M1JXD=P8A6y!{$!1eMr{zwzL8I)k>_ z^60t3I|N-36vgNdhKbZ%9M{(`^}zkH3_DZ-h{s5@W}rNE`|u87_?!mVcZ4kc;7RBh zgZ>cF>|<$RHXOnl;M$?!aM29TCkaI$d*gjL#6Uh zm#Yg|KI2o;j+q&KZRQHcZxWdFC04iaXC{sDWg(5UaC}bp#;B$ecb?Vnx&&phjf0hQ*T+`GtOa%4U@!Z^W_# zQqNQ~SES_cL^by}oW}I)_}1;w2F0>{HJdG0F(KCAg$mJE9L~2Tpy4#c6d(`Y^VS1i zw{$j!9_00N=%|%D*GS#TmM%SwB90Gu&1v1!f#Rn3xD_p{UXTZDelmI`bi`nVSgI9U zG4y)4nn%)werbtjbmAq3v?91^iAOLy$ZaSqQ~|?gl&jeNLz9D<%eGF{+ltf= zw@-sX7SeHl%wZ<9I)@e8PZ>T7?C6s-5X7?4f5-rPusx4enpc)!< z4NLO*lb$1d+L{i8>z~%BB%Eo@|~J_B$Olj9dz>4#gFos31SF&U4J38_5GJ{M}L$3^8_gER)_wE z{h5wqR0UWDxh8S}B07g8OEXQ$@0a*#;8Rv&R562vAg~dgKO}PPn@i=Iq2-}n;})~| z>WgwMUaukjkJBS~ZFGk0DHW?=Iw4aA_mY8dsMI##IEap!ju#uw#`N>(_d%yU%hgus z7IgerucO9BCo8QK>YX??+xyvgSTeN9g499oRL(<1SmQUR%(=o1NrlCS=EDY?Lz{NA ztI+2z*)_jlQ|l9eF6*4JnQ)TNpZiD<<#Lh#+?XJ<}i4S4U)$(C>)%`z_=N0@4;A_6cSv|AI6lx}Z`j7InvoX1_{YRZpW z;mro(v~DHt35~}0(DILRe9$hh_urc7#b`p$>(38m3FnXjK13*5U1WE*qGDvxApVc6~c9U>~tUMMkn*L8 z-)Pl8IL_IAf+S}9%+=~z12d|@BleQ(HieA(eM(PUlop8QuK7aqz-irXf&HK<2moY8 zEE*HUVQ=*PK<^QC5{5}h3_zZw;W^fW+MOOSU{tOk8t`^?IY5_6sDGA%e^_Y6D!D@8 z zr&Ll8V*+{bFI%x9q-ZJSEr}x%BGimB55u5mLnaT49Dv#5LRoXtjU_w07KuBLCE(wWe%^l3%0lrxcm;{g;Tf(sqrZK6)N0SHNG$FAQ?I(0 z!I9SW+ms*(0cQw5bPmzFgs^W>6mj?c`O>U7gYSp^-k{*wFUdF-MV=xVWP;Je0b!jif-{yZm!bZ= zS_PivzeC)rE^3kHQ>}*c6zebUvK&Qj;U(e-@e_W=L~_&`5~@Z~gkikr=F$O>d5l64X?Ge3t!13d(5`ibTV<54Ve(>W6?BbC7 zLwv#a(^+xg<|0b=K9oo{0+fGiZyOCse^3@l9m{aEsLXKx3ji(ococ&%q}10~DC20Q zu@5Fw;Hyp(NKEEl!Vvi?U~$Q?Q0uR9hM8M*x?{WPwt9-GuUd$->6ABc3rjyCj|a9lU8aa=-kgi>dy+TVZ3Pv9;I%FGCV zY!P)KQ~3Mrr|1uDEV|T2<}xUH8B1h>XfT%w<3HEAMoLy)h@?>3u;cH1sJLo( z*%PQ$Z4XA{Tp1$GtlXZT)C=qylU&(Q2#M-|7Vx~7$1S4 z(5s!%PJGhY6JnUI+BjX+4-vnv z^RY|B7S#|c01%h5 z}x<6 z{X~#U6=w9WKC7v%gsli1PS?{J%dC7dZG-+GL3~EeN{w2I-zpS@CgQS>jsn}%)NFWt zPo3mJ$YwOIQx@~ZeOD%hGGvZ_IakWP96%JR`!O%JJSXgQ@~-QdNbd?<2Ky+|NIDJB z5Vl=?R$O*y*erI_|2_XrcGwhBK^;FRK)ZHW{h}<dEIlT--G6nw z+34CoF=RDcE{6xor5e+ynR}LMI6bLp&05@lZx4%96!ECLIB3!)vdtv;)CRqfMcR?u zTs(uUM|$b9ca=F6&Y7=D-#=?_Vn#pn>cJfR4GP+`_`zg>@>r4k? zOh|A8ZUMkhAz#o|m_{Oz(l}p;bGF`^7IidHV|?G>g$EVi?7mAmtw%ul;raMarA zUy8JAPAGsy4HfH_)~?i^*Zieo8JErG0`q97j^sjvUOk|xKMK)|kC&mOELriYjWYlx zmuMZpy33XpUwNEbN3;Lx&<@mPV~;J;$@`lumC-dl+M8HdGT+xadP6yGFOvq6C~m1m zrj%eNe-`MG46|N`rF3pONDGuTRo1fU`+zHI`C;H-h6{weNq30Kw)NT0A~Fh6)wXDH z9xuZc((KCr9;)yBWoNZ3xOy&Lk zi{n_7F{}r(1~aX`bm>$IfY>Im^j91M{TYkS#nRT}21SaY2N@bHNlGfmDzl%@X^F+t@oiDt|s@^pCmSH z-8-KK*H?-Q4R$Qfy7eimfQ?gCD=x?`7sMrj!{NP=Q)=@~=P21Ewuolcd~L-RJnf&_ z3(c-l#nD4eQlmT^%|>LVvKMe`a807b^^*zC9^q~}L-3z@36n3JIM#m~6mOm-N!s`3 zwpQhe8)nm#yk>naE%%phi%-Z6D-<_4wMe;^R|=MYX{p%SciIkcen!Z?tvlHWn}Nw+ zr9~GfdT=Dqi3O&xdP67n`8q6HAwp;s%jfdtyQeA=e#2!Q06>Z?ym(~Mi}%aVt`+dT z_|Q_(!%A9IUJsV}$k&6TQFxB9R#F!FOFAVgx^<%rbA6+oyXMmur4AYV$S#ZE~}Ma~8~S&l-whA)Sw6 zVK9#*o`u~ChK?w(;u|Y1b=b3N^R@t0o~6`|XX9&F^+gJdd9N?_1$*BMZsA)WmiPiN z8YY9mp9&kwRNSOqRIs;QF#^o$dMK|od8oM{3IAIp8pAq%a za?dAt)1w|B6)hUmGsIBFDa(2H>Ko|2g^zN^Tr*`pCXAF?<{pgaI^FT-%(AKokx>tlocQ-8?ajpC&caYK$^4UC<@izZ!q4OQ&-N#- zR!4H_->2D~Un08*vaK2SS6a_cRy+e_v+jx{Gm+ofWmv%iV)l>~Ly?+dsmvM8J})Odwe2#PEkULv*M8z~^I1 zOlQd>m4ifye85tI$dcS$1dt;7M@;+RE6JOW^u!W$l~a74Z`w^KgokK!Svbef6QZSL zXx2>$_{KlroR~HGT2@G8B1&T~!zs8z1@i`@08-+Fg|wBp@p%S^T!x-^oy(wbu4(Xg zdSlm_$^5AbbfHV{+tpsUNi-0_3dkjiZ$&e!*JiN4e?TPJzc1VL|S?EE)i}2_PM4=vv)AfhRJ$m-g;b zP>b%lWwBLz_0~($t#WDOcdblD?iWUBm1pvu@iz75euz(jWIsEEh#aRsXlKLGquw(L za~TX57yy`oqaRBRrcha{oH(t)&D^~6CA9C1XY$_N%#RlYi=v6?e`!Nce8|Ft7%sGV zM;q$T!US_oVvW@E4`QwsQE^gZ#AtSbRi+;f$JJQQdBzSNgr_|kmm2qHIHR&V)Gb{o zUq`$eU7{U0>W#o1+RwUYsESdzU`Erppya|^W#`3I1RaHW=NHSHXS%*+c*~ty+SKjm z-6cj@ZKjFlogVci%$ii`G5xWG*P-tDKbuFK#b_G2GDf0KH(|BZqUIMB`;VvfX3F{{ zW}tb<-9blCFIH00PiD#jc=F}Q!Y78Fs<8w4HZug}6EkZQ(T$el@8PA}O{ytd-1~Kw zzmlnSFwL5#MYbv|oGO=*s0#D;k6k7~E;r88DP#nrmxk6G2@w)1+oA`TtKYI`V*m*A zNaidOY`=|A=%vO3K5%mxjm*hhhF}hT{0LTb!CcY#5+Qpvi*Aq1p)Lwg4^OeLFGIzv zt6sO^05Dt)BJ9E&|LFxN>mUt2(H_&db)Z!H8A>|XI?)X}@_ry{KrCF9{wQU;)gjsm zUpNps(^RYea8`Zh@%I;%|2__Rf4tF4ndu3Au4?PMYgwSIv>1ul#K~7`^4G`HCywR^ z<4{dI+>(<|@0#~Y-K8HZ@dTB$sZWtxb@pPc?9n9gv*S_PthNmIv+#S zCEM-s(8^7CNW zAHFZNm~YlV&ElZ%Ns~L5sAfWXPnST>EN7h=E|Wedm4aKtp+}&%V8uLbo$-7eI1|gGPogtm4HU3wf*Onu!Qa3!HPF}Y~qxvkMqI9`Tln{lZQ;DyZ z@N;-ej6s| z@rHvj;hW{d^~1biHPRSkAwSN^NpwG2po<`A91enlTRhGUdf1)Y+5j{!je3V->6+hf z6)D5xx!o}A;Le<}Ah}_o6xl0({OrsVm#fR}dz6!;hUi6hTv%PUbA4 ztjef|3KrJ@p9iht1<%fqRO)o&WIpB06;c&D!(w)$8b6hYOJ&e7!rNx`P5nXbBP&H1 zW|J2UiF(Vz`gplT!FI)h(FCdyEpSx|3vl=upuznui$#%ayxOG|thnY#_=CpmbL1e}hP&&n)k2A#S>! zXgTHiez%3Ufi^yPyh2}s%eb27Fma0mt0P#a!7e|H8b_FD2qNZ%qXo}m&eR7xC>uIN zZ~eJOZI6L+TDxRBqFqGqhbv5Gv4yo8tn>dgE_{$_K!*+@C`WKq&al|m0W)Ax|^+AoNo_9!7dJUKYop5yWB z5Yf42ufz>g{Hql^m3~`68%7|9zJXQ0=eU5n!>m`yIt>_mSxS&`uQxHI{qgL!Y?OBa$U;XCcyDqc+oAG;CW$VwJjSj#EW)WZV&3iS z;($s`QDZik1-SxoH}%pKUP;7a>&>s zk8^06C_FSd48wgVBc}WFm`>?>g2<+WW2Ms&mD-x^gD38;$$FOIV6@QZ_%i&M`60LF zSL)UiUb{L5iX<2Fdb19#&f7pTs7#)D*MvI#9?*I1J)6mr)W)M5x}EULnJ=LC>K!3? zLl3=bJ(k&wZWQe;k|mXbZl%kLMTtE6`yXSA4obmr8&h1EG~B4!?1S*t=CqEk!(x@q zBDvXU$a39)Wfe3c%N0DWknYvtJ!{Ydo2(*{ylJrGnMV)oPH!F=zYA%q`6NqmaQ zps2$-gCEmkuB1W6533lRemhEIOW+j4@C7IDu0B(EykuzO-B>(VaT0S z7Qs?1f&BqY0o47o+!!sz$-DjOP#}84jC%wi8TBh5oR_x+kJYWve4jCtU*1Vt(PCj%Oi9l9Q=`r>vZN!$lz5)x z4alB}&i~BhE}h5VkJWBaI<-LPKsF~Fl3AHKX7fP54TYCNF` zyr$nz`fRroco_j$kT(0Hj79 z0zCOX%E@X87nKS{Nk#ranejpt>(~K~j@{s$=xniuNg;dU`ZGOnssvJv%M)R-=|Dr; ztyq>jvnH{8&HT~d>@bCy<5r_K%*ykP96iVx8}n`(TAx;`XZehmm~_%-_sljsY90dq z`1KjRqG+@f5mbU?fW|agD{!NU^oyLN!)W z^eTHARUjaaLr$t~;|#v1WNv)Q1VAdl*ySQ6QpvcHWG>B8;22A}K!fR7JOz<;j>uhn zU4+Wcg~j>GY5`egWl{%G#`_z+gyNW4QSyH1k7LP{6%F~0=+-^u%?>%7br)#`91V(r zZD_M~a+Jj~>Mgh+PNt5RS-pU&LjkTou*B=mGw=^`I+z`JJ-{No$IPI=VQ3N>APl8k z_?qZZTkaUXUmQyD7NEsg&p!exB3jUs%tGl@f(qDB^fnO4D_uIx;QXSDI8#}Wol&h^ zHz`ZjtX}3*uXQP%o6TV9Kp82g{a5-G$a0>X|hBIxORnr$64dg`!{^0 zWnci2LmfZJDpL_6{EvlJ)~yY&Y_bNhi`|8UM$I%5R9l&UQ<6zX`0c$R3F%`4-1wmC-AW<(})p#slX0N=aH9OVU zvCS1cWZ7@J6Ukv2I0slJ(}!WJEYkMP))pElN8RgG#T2ge(K_-|E*LnoO-v6N}FZ${58ByeMIddNGN{v>!ncmhlw?*V|}<| zCCn3_doQ|M_oM345E3~iUG87PH-)AG|}a>LvHuzVWvMl`p>*?c2Z z0$JGQh>lZRSM=#3?k&oI^4#@f=2P$Rb0_kuW{}Uz1J?NR#azN-0nfokfb36NA+s_e z)KurKmMTabu_~i<3ZeY#l@dBRn{?A#&Z5`y=e}|2=zD6mc$Nhta}^`=;Rc^gXw^~3 zG6pb1y}M)mhu_L1dFyhX(P-?{pHHsvyR_O<8jB#KQP|nQpMD3glcyLsRhOu8vSfCO=*X;LFG|&i7bPLFhs4!- zXZq*xS}IG_#wnKR%U!6mB9XJnf)5THJ}?71o_&rCp7f0QzaH1s>c=f88Eh1||4Ym~ zfdOCW`Qh~%cfw45T(e~IWTq`pZc{EQ!*qpVsgir2BfKOvrn2g@dDB)jBtz<<8tZ4eX5 zGdUgU8PSym>6~W202DoB=CT&&M1}(>*v@Y+>uViHAyKEYXO$G;_2j-TW_RO1y92wf zKWe;}cfa-GUbFxumyS8Y_|kZh!|@b&6DhP`b3c<4MM>e72peQhg(#AgH?a`mWOryH zpk(mobi-YFneNwG&Xe|*zUjUIcyElXv(|7>B#Z73=U_GyJ++N0>^q=iDjAoDJo7o9H$!uliSD-(2#E(9SS7=#T!WCu z%L3Y}p*30Tly}tmcAx2^jL?SWpZx#B&qB{3-7m9eCq#4ff#l_Wf~{yF*j5N5(Ec5$wtAfS!DC`0`$-;62rs_gRmPH;E!bSrm#48g>p99Pq3SEz0BYCu|hBeV$h1KPdL_GCBO z-w9i+Ot(vu8w@(uP1)EzX&5RzNltSH!uq#3-hpetFOknY;;34tVf>z-@$I8icS}eC*%HChpvDXjR|rz!Z+tYC0F7FGH!N~w z40onFFc=6^3t&BG4%AIF8|}N1Z=FJ_bx3sG6hM{*gdm(PUe>E{Xt`wZ7x9{7=*6bl z7FU1wMBEw_@mbASaMj)tl3(UZ%ei#BL zswr8qE*#|{KPH;d8)!(lM9)64-tbXp53_8>S5i`oK$TRiWbss223gh@dUv(aUvs{9 zJ@wr`VVb~SRm3Z2<>s{WF55AoC z)@ofiTvS-FaMF}%D|SKMqD-4{T)M1hlvq%`pZvj~Yu}tnTHcVc#&+a@bxdEi^ofXM zHB-`~r2CvSsqsq8&>x;H5EXS~v_C$;1r>uj2XZ;BHm%UGulGAo@h;u1B;=DnTcBL= z%mLdsm=;wc4Ug?`ua~?I$V}tmbN|w8)9wUCr%FNrJVx)4PAGZE{;E%?*S_XCq)D^k z_B3B_7cKWE>Yhr#(bwo6_O<1Ly?=djH(e{%Q|L5%So~`~xY=m+cYvJX^l>?*4?V0_ z0^pS7u#oQMYmO44)K`s+>LwXZc!tsYPxg(l9OfA>6K z#Rvl-#j>;l&3GL%I~}hj9V64SecKDT&X%dh6Lssw17XsncL2Pw+wDhj>tMj7yqpRb z>oMNGzMwrgS)M7y;ES0nkCA-6L zEn-gsTjTYYZC|9ZNutRiklY)t^W@tAj30mkxbbvO==T_%fa=)l^x}YN-9{z*%}L?_ zTeRbuK%l{G3njHK_RexO+ui8q4>J|k#5dL#+@Rk>VySwSR=OgwNID+O&6nTOZ8E<8 z0O~}^kW8aOLO18(P53BR3JRBp%vUi3d*hG0O7W%Xu1rrlU^K_{$lP|rxo-k_t&Y9A zlI*3TX^k_r5{l)IqR?AwPPJaQ;dmEw(^ZDXe%IETt1g;)})N^qd{L6t<-a;V#s6%Pb1Bl=--3ow z+u9PeYap)63ROiPi*Nmv5*XlDLE0h~B=L%>C}Et3GhgE5*p8l`HdDoz^5W3LS;c?fwt zKf#1@J;jauV$n>Jc$)cTQz!@eH+hu0v&1p=0AhsCN31K|IH`<|LXRf&;j3PN?1tLE zycvIQGPAi`x9ROV!sLLyLw%D1y$AFn3us+GjnOZ)YJ-WSxv4_0+ea`ygSWwCiE??_ zcKb%oe>GNaa`{(u^)%-JRVUN*ic>VHBVdL$7K{CZa;;+tJ_QGW~1~al5&T3cR z@%N4``|j9hpIk&nA$7{lQb)mM+Ytn21qxsIAb2b(DI5(*6v&94-T6D$^|G#vLl`g> z+HJsaRu2duTP%W}9u6%Pa0ypO#ak@9-A*&CY5^Yq5Z~D|dF6)vhDv>rK2*905+s%j zMMR_e1$i)@jrNW(c7OE#dXJIS<|eFJ(R1#KFR4QF2~lO2L-W;rCr_4_)WgaC8aaoW z$!2zxB(*--m+r@mtgkE?UFh>buUf@YuxABMax-?$O2JVRpD2xrVLUxT?@kgMU%94A zA#ZxBl55{iD|-}vzg#@}rl{~oNW=Fd(bo)Q3{ty`&UctY`lw-U+KT!|Pe8gK-oHMf zef@0J8sg&G-%~@N6=ZgI0ky81b;eRg&IB5MM#vNy@?Co_Dh2&Hjy6|wy~7b`D1Q=3 zuI;E4++@DwA z>RxB2K$K#_1>|pqrIa?+I(jiy$$dxmx0!V8?%F8E>cjoUte1Zgx#j2zn28`IJK}gB z&uCRf10RL(pUJ)8H?GG#XDurd>KTD8K#i{$BrF#Y0`5%ZpZoPbs8IlXBO;(f&6}&$ zv;l#DF9Ch~=Bt1B+qR-;8~R|1LJ~RB07MX_#Y@MhCGFo-imgji0b~Zxwh2heKU4f* z2XuD0y?zvjWMu_IVJ7x`Lfrir9>1`#L9$9mL4k}CXj_o{(QY#Ph~PtTyHoo(Rk8(n ztW>SnSz87th&DFNJtMfg>xm4d-TtXi(BLalER~+bTOd_B29n;x56H^-o1fZ_zCpHq zhs=OUrfSd}KJSofz35lidG{PZb>aezwpj}TL}QLLRd@W-bBW$myR~u%nM8O@WbS<& zBwauNbmQ6++-dc%C1nC>!(Sc@QxF+KJAh@trjEL~UGYc04d3NCzWYF(2nEbAuXue* zgk_J^;)z8K?_unrN{QoWm8niYJI`+yey2YZjD5?9#v7N_*+rM90L$v?dBeSQn{cd`1jfFjlFz07|fjHrf(cauiJYuMZ>5{fk>E0`0r0_M@Kfmoae;9 z9!+q=tu0^6kwJ|#zgj3BccSf|Vq&q0Ka;qLQfJ9h40u57jfUga%9{H~iW(&c+lhR% zTR$6`VzsyIp*p;bKKBNzorpzXV`~hAzCF^v0S*&D{%2Req}zaH1voPU5Gg3P&IDUz zlEU8FLh*Q_5EBj#|C0pX*tZw;Zo=bB%g!jh&`DBcZnD>8sPD=Upv#s~In@4W6u?yL z8DJ4D@vn>xO4fi|v_l6xJ^Ze>eWVVxsCayEw+w2MvYC@3i8Okjlk=Qj-?|d{KoyC_ zU7G}H+3KV1_D_oca=nG-h&TJ7J`iUs%50%m=c>2jV0tiL>-SBG0T!CTB z#pY`VyhKDIv|Qz3l4)K;kQ6|{O7=%PfCBlCAH3e_d|i81c#oDikpU@x@H}*Edp=d> z{Z97HjF#?7bAZ4i4I9^UJbj1O%wW(Ds*a&&46r;r!14!n#*~2BTXg|KB@s55dZ0{Y z^l)OsR!(7$=NQ@hY=f-mnc?g0!{)tnjMuTe57j!6f5VyT_VwGf`zg%8^^jg4FQXM7 zS?N$5T>i~`OWE(R`T96F*Uxv;c${w>4&O8+h-_9AoY)% zMQ)tFi7hOmz*POBkfr&iQ+=QZNRSv)ZpWj1K87YrTaesMMX>~jxvcO6TU_;3VMxR~ zoyMj7*IVQK?8hte;LlSMpdT-sd?X&vi7UM5gP<%Z3cDMEj)z&ZRghkz`@Zvhwrxhjo9XpA}Rwq8K$>_U2>(!V0)u2`1u zf)Dw@d3>&kdz5Qez@pcn9EyFt&+&(G&o1&`7H-p7tO zkLkC${`-oLVpopHoOO>A8jC@qAsoe*+!Zs9zckvd1`3D1Sd#v zhd^)(?ykYz-QC?+2ol_Fp~2nVAq01Kf;+(-{?7ZI^Zt8Zp6m17@T{j7t7=wt*Q_~b zjWL=iseA3IJ?wX}30;|i{lfVO`b~C*>G$$CeT8fmW|5HCH+=qW_v0@2OD0(%3Xtj$ zhBVIkO)pZ`1Xp=55WB`Az(ZYbaG`{;%OO4(*q+(a9tJ8PdJLgM#SnWruad}gb1S8d zaPH9A)5L2!eEIOvSM#S*wTV;DX(L7yqsLq_G)sY8X}0B+>iOOQpk1MR z_}>kta~b%(gIxpv~xb-rlGT%9p%33Vh=-SWI~(+;SZE@zSIxqqDI?|&VYd) zm6QgvJzd%E9)yg}pM^#!0zcK=&L0_CEX+l|!Go@8(6#0$-+HZ7hWt2VS7S&7dZQ)~ z9?18y5fSpyEbakD8C^$+Ov+lph$#qgh}9+edlsa6v=4jGk_rY>x`Gy_P|>oRv)t~# zHV3zD(7DqLPMW1D1si- zv{031_cVTYBun3X<~5<==}+35|Gq(Lpfr>gOJO`?mV1;32|tq_Bk**O|5aFsQV_b0 z1ibu8)A?5B23!=wCJ=BsA$aonMsluqMz08;rS(IjyisiA^Z8kO@xHfG{uBsM3nQLz zGo8Utk=0Ki|?c89-0Rcd;3f zkjLXVf05BKAMb}IO#NF`sCFai+9C1`V>nxOf(h42zNVqKR}g}kDM;w5mOKx&gL`28 zfacIinIZ%_!P`N6)>!T#e}1Ng91+P(|Leg+vsVR*zL3A|wi^vF|Bv}K2q)a$<2Ag4 z9jxHM0Tm`7mz_Lu?!ltY;5Bj78g0!MEYkIQO>)?nK=OH|zc1o}N}Hkh3&SbU*xbEA zv|!r!PeKZ5K?&^XczM|}Jz62#MT~zS^;v%f*6}ccwyBu5+IH$>qh&_8y&LV`tPUr{ zu5qX0c(kBOAY`xyDb=#Ns`IGET{IyAaGzCZK!gCD#n>og?L@E#T@{O@1Qn zkDE%^+iAV5KN%6MfNX;LTL{y;!JH0GC-gokt_Q*#buM5J63#lYKTCS7BZ3nuq#E8I zj#+j8B!ScVBr~2&;ZV&*wokA>P-hIx{QyR|+$#BM!PU(3Ab{a$3IvO3%&i58=Ay-l zxuev%497slyuobeKaYI*qVHsn&*0jO^yZw7L53icpsDma;pN2DHv}SlEtaySn$#}m z6m`>#7z9n(9Nn4>>=fkE%|5so$7_1HZItj2Kq9yImz?!p8&F{o`4xGoVS`G-JF?z4 ziVHV>)C_$~$VIWnHtuA_?LMJy$B*bd`H?q?F;TQpDxtkr+=VXA=Xw#(v*T8Y3=Ay) z04O|V3W;8^i?<$vv1uIK@QWhm$9FbQJ~s{LQLWGwkb7a_oGF^ft}$fjA{pKN^)N=I zSf7(F9<}xq-FKIn{eZlM4y@c~i9oETjaX>ETm9LzamV`OE&8SFaKcjBA=k}dn#x2 zO(76+yW{`eki9uu0))|0TWkqBY5siHmH_qtj9H^rW1ENxkbZi3^+P4(r80<1nis;} zL4%gY8V#3j9v1C3%2{nj6NfHMMvdm zz1t^8M!gPz-+oKzy{h$yCWqbgX7hQnpZ(UQb88Dc(H#D!J< z#K!Dkf}rk_2u`I&r2s;1hK(|7NB(ky22V#V`c6ULVdsbuXW=I@K<&a6V5H*i5Tz#Q z{vB>Nt46ZwUw{S-MG)g9aN_bltC{{l)_1`Op6|*#qPC%}w?UPFe#18mtC>V%e&iSn zyJF+559@Uf4}OyD*J!Y{d$M&RQ#n%auo)6<$^Ocn11^VI@ZT!CCUhagY9t^hk!Spp zHE7TP@XZBER-SToYY4;e8A{Pfixdoct(sP(P3si5(Q;bGTBCeEn~YoM(=DQ%!jAsz zqhgAh5#n_~ptHwDF=Q7{2qfZ-+5D9$iQO0;v4jDtshbD!{hE46a$g%Da~Yb~Z2XSJ z^~Rhb;E-pZfc3`nP-KTtzUEIDL&6`o9`?Bxy4*jBYJ%U;ukoGZFlCs>yy1=M2WXQ~Q&PP4Fn^8K2ZvyxN&EHy$0^724kyM- z##&iD@O)mc_#+F?zR?j-NsZ^s*lE)Bz4142e{POFf*Q0p_V@s}K>@)|w%c?QjxJao zimilzT`r17tujMP$cgJl>hES!R`yJ~F?;*BT6tfeSy`F(G+-BxNIsReSSz z*@0{t+cx~dg=4E@gUXD-R;4S0WobZcRlA7*HF3zIr;$R)1>8X3{x%QqUD$NW^$iMZ z`G@Ze=*0GrQ)HP5h<`HY6u1+9#soh<)V;XM&6B{PX8y$E^w8SUjD|$GCh5YhN5@jU z^t{I<@`;g5j;|1VF7%VH=#I*4nJ#%EnY{X4ypZf55b1L_TmFlqD$kw_f6|k!M1><~ zM7h##!wdJ0gg=EWA{aNK8-R5!);vkYMtNU6Q-nl;BiLZ@f4tpONROjUX^puas=zIw z$T-9|Eq+Y6mkKL*j?}kL)zo~vXZ|A&gCC&0KC*BaBEsHk0)X`;&Vi(HARLS(I$i4z zLnjTVpGbTHxs3yp1v+13hxF!lSa&B~6!7%q`$ghM6;9NQT?lbH-uzC|h{e2W6_1{7 z+;9VW@WYs;c$?jniI}9Dcu&o{y!9MwrDiduZTR5!%2#Grbwjl)H(tummRsmEm8Hpy zo<2{8%O`4E@0G}B@W^6{^tHpS#%G$<@y5Ky#yo}St?G8WQEP3a&jWNc`FwW44&H-?bowvI(uIjKRY2CfT@aEX_Ke19jiG5C}`RhO1T> zD*;=RcJP6aSqLfttQsvQ@^xneCkpW+5-*C=s)7wSe#!F#okz9D20|TJtf%5-0*(EE zPdL;_>6BVDQkA-cFIG`>D$qRm91M=*#eH44%j{MS>R#p_iBmcnZ0ycog?a# zO!N3qSEB>qU5Pm00^Tb2U@`%WU2GkiPJTXuah~bL^KdDp%8?T50!_li*BkQB9yi3g zE#4~9!LGAg2UD!n8bx%75v*eKnrE&yYI28LeffeKf!Ow3OPr8AzoyJK-zyHH<9*-q zl+S8eb&amu-qiZzdn+BH85-9Em@N9GOc~iasZLpXvtOff9fYx8p~H=&%d;_+Aaq`X zBs|<)etmN2a*fAAbQr;gv)}DLgl;3nU7x;Mx^G&Rq%Z0|{+*!VZc#j;h*=HZ+oQNl zx}LISO?zQ7abjAQb*}<4lkT@^7qmWNzuCPfJ5oJPKi&`63b@~Zk_I*;8I>(94vA$? z;qfFlc0&{rUTF6>ujAkTPg&Qn4+s%;6eCSJ!bQx}W-E0<0A?am|BizlO+bVTA>JQH zm=5>!N!BS`FKNT)oJ#{Gt2MG1(rjV5S7p7&MQA>4w^>;rh7+AGYKG;y5S^+-@H zjU|giPa1O~r5~36DIn>gwm@{8Fzqcs_|n0(v&8T1FutIciyxu>fE{ngQcP_)gPR}V z2w2tJcM=`bM#xA-Eq7j_0l3Y&WFhyuQ@&LC z<(8I;Z9n#&_sl=xEWM*DeKAa3p&YNu3~ZY40RW=<>3BU=>?)4Gjn)X>yq7+{5wlw{ z!;!kj#+~x`G7g?-s2xq$7Sc|c4-x&56s@}igCG9H&E#~CW}L)y_qshf9aNiHWHsD2 z>JS_Yz~x^9Ob8ttH}ie+neoOB%G!6dbwf*?5HLXw%XYr8XKxjDX1!tvibWzHZTh)X zy?~iS-rr<&`@pO$_GQ{1ht+1C$JT@?p)05+h_2#iG)@1yQc|e~Logs|Rf6t7-BRB- zS$&0jD`8%&#N5T~dS9UZcCVS1=KOx*={_kFyrviYlYdv6cy{uduuq2zZh> zS|y*nNhJd_UKM~?@vW}+Kr$9a_bq9b? zTy&~kFP8d~pN9kCW}<-rEzf=B_i)DLy~ajWUB-SilyljtTi&hxnm?M17(J8xt#IHt zl?tJ75{*;*H$eL*f0*_}8NlJ8hgNvBY3CSLmBv@aFj+SdKt4|IoBBdY{%m1&wku6!k&L>{$;g zh)U%V#ysihCoy4zi)gB+c-O}9d#w>-6Cf-?V9GGHh=m72u7NeRbyZrxoU2onBqS0G z&?6yfgEKnY@_9n*Xlez6apXph`;VnR3A7ncg_u5A}Wqvx9k+tz9>)G-`f(72}bt!9hT>yyqL zO;9A>jCAs2T{8f6N{9OV=9~1!f(Yq-sA|KF3mj!AE$8d&;eT`=EtWq1T12nEil9)afKTfvv=0I&OtnGFlAHT zT=r}A^!Cy7>QlK_g6V2g>1#$;YaLhJKf%o|ER2xp)$Sz4<2Qmg8KcdV$o3Yx@ITD* zyg6Y`6U$`lZIqf$6(_Ys zN>27`Oe`)2s$2dZm5#u_Pp!rTH=Z<=SK}4Xs#AwRaZiJldRuCN!wtjsLr|GoT{+ay z8_vUDj?g(vxQ6;$%+I~9DQk~R_dKVgS(pMl1U#}&=8-4HQZAnhh)~72F6`G$oJl<% zYAX;9*o`Py83A;^?b;gYanU4my-Fod9L`F(9PFiiz4aviZUVYq%a?#jZnwD-pU_9C zbkfFJ>&N;Egv;ep9tQ13JQbXhwOoxgILUZHr*+ERaj7s3%-;tN!#Nz$-{idw05oaz zH3=x;Cx-(-7sdSYCP`mWu5LArgHtsUqghq|8-=#r?hS9T?XCH8eXdv?->a@rNk4{P zO{greLxQ2ch>}-a=UmV`S~}{9Cc9c93LDGhk*yA?uwEsL(a(CR5CU5$EDo;(WVD4|Ct|IBGYOf}Slp7>5{tM8`O1cdVhEp({o6p1L zaL;1gD+}^za6BR>?TLW}QcrAY?49ph=pPb7J(N4?MCd$|@NYpYQE=Gx@ezZhLQJXA zK^8PoGP@h&md0OsoP?0x1RwTMq0JdIBXU*dTB%-hd0yFZq3TBz<$XCzB8?63+xxlXOmnAmIN{wf^|SGgF_GLfh@f{^5jD zz`<5 zsQdui;rM5Em-OmF&I_74OaDf_uf}Wz(n};LRyEI>W|m%qG>p(U!d-AcRvVf}Y;1@F z&}XalJU#RL(=F=t?!ZjY#}#MyrnuO!-cdE4ujR2$WcHR^aw11M;-f7`i81LrfE+O- zgIO_!e0L@QzghV#1AzE{E(*x)BB6+oojNLD@cMdxTfe`4VAXzMMzm=v6AVWaOUXRK z&SW}S52(G=w0l(yy|Id#9YVkV=Rn93hJq7($XzXO%$d(OIwULh-f>08M1qhcLSoK+ z5^ZKl>eDMEr4zXz&EGpzNvfeJ4a{oM$?cWp4?0c9KyZ`Md`#rvcV8~n5cO^n0XP6R z$2mXKdqa_fU=qAP0;NLq0$9c)K=g=OuL>0)iXib}Y}BMf_`!zH9ncEAQz%a^5ElYu zC~9oANhX(AJ214c)GEp+>^b|z?AGkq%o$g5JvcKfJ3p$Z8*c*YOsSIStH$D2KlJ)8 z6#ex5-AibVi3(R?ND&r{C)OXDpzxBK`zsx`%0a}7YJqCiIS&}<`D2}bWo zS?xuO=(B zW4O!uIAvr<8w*tiQyvHJuZimh)z{n(yf>cw&@GH5LD7T<4F`Dq60Y=y7+>nL=m2a? zubJ0gt{wrY!@6xU(&xNSL}ZzKF5K4NMGvoqcRs~H5X~|$>W>2IHMU9duCN zjWnSDA@U)&UCEYT9sqfLY67xa%c(EzF2TDI`X!1z83*&VrTTL)ny6Y>44LZ+O_?Uf zFZ=E6h2kw9{v=Sg7zS+F5NXXbKgKINB*&O+jSvv+*lct?Zpo}D1%ZJ;n_IcpzB72T z5L{C)EY57ZBNjaiCtBRjF9^C7Z2`&yd4I)(=G-vXBR;?PF6a$*F&QMexhWn0c--QA zLwOlK(F4?sh-w2GPcXUntvC%SUk`YdQr?}+YFuV9OKsylX(EqNkAv1gO6;Eb~aaDQGgD(p)Wt^Wm-~x7S6t1}9uSm&Jnx7l@ zvmFhA4~^uW57yDm<^<>`B)P|%xXta)OSKi(7hTumRkQ-6Lz52D4;ryQs6XGefA4Tb z_^`Qw#=|P+RBPQt0^>mP;{N^l*yCGvJ#J!?GZi7$xWTpT2QrE7R_>q=4AP(Y%aCg; z&nilDJ!{&9_u~An);mI}bqg@m!?{c{MyZ659CilFbQRE73ro(28~g8ui^C%r{^u6_Q0JWD4YFlef6>A$%z{TkN~_1#_B z>g_LUZTv+Mj>IuW0*iC;b?;Lsgq-T3i(}q}9!)FwI8xiryA=W`M}QDm;3h&(?FYmgJG3*)J4s)vf{L)& zX3)edSAD~t=UNSa-oxB)-YG3`#L+D3);wlW!nFNTgY@43VGODlBHt2q<%_h7j^wja zR6NRx&Y<*dGKj<;q*=mw?_LXdtC#rb0h~Q_{YU8|{qrjeCdCdX16SN@s{2*N$@`eh zHbA?M$M9FiT&sd-iJ4vcoLHuG@K^i%sIzI2?PT#jVWb%Eq6%IVGMz(?ML?QrU2R3p z4|Bq4^XD$LW(~jn+N|KML;he~wf`BKeq!TR3837C=sAyh3J`MA*~*Qh;3I~odkzIe z3&}P5qq&@3L&RAKThQdRAER`>lBw}-J~pgy99q8-Gbq-0GJfrUSqMgOD0Xi&%WBU zcdiJaYnwbkgv|d~m~w!^ba?F00_vJ5t3p;s_Q8dlnj>rdyGZtE_A1<=GUaTWb!ce} z7AFD9{`E&YrP^jYDB6uyFPz5>CL`DCeHj^__X6+KZ_+(G~>g3~^&P*F8Rt9y1)d;Rm%cz-=w5ysQm| z0un-H=BTwLBQAA;T4`xpZ522cxfp;V!OmX)Rf^@j6W1_1ad&jZPtCu42+RdqlQ>Gv zHlvg`&WAf6!w#m!bmKC#~=9G=H-Bo?PgZcj0m?hAnpORXJk} z!AE?w|KUmU%$3h_uUTKdf%?Vd&FmFm*U6@v50mNKsIfkgdYqM;CnqLk>I?C@6G>D~ zGsw|>kIDG+OyD=fwBjlyyS|d6^EUM7(whRV5=QxGo;K#<<>qG8OqewZvt|y3%`JrQ zq~TZq8>ZB)zyJ8rdrlcjC0>!GBp0E~rpA1+^Q!u+&2|>qdual9<(>GYY4R-H6PoC_ zv^zafFfMc6N_h`vb(_>Kish=^SCy=&LWH;c2XI`c6sw9Z9e)SH8_&Vt5ahO?PguI6 zBOf5R{qR^QPh5Ep9*JN=uo5|qnym4 zyUKCIhWc2wRR>#-R_z7EuQ5%?i2((>d2H_`)I)M?Ra?TM zVRHzv{`%Ozf;RV$2LQ!lK_KC~3h?`>E))JxP8Tv%Fl&m#Ks$wwZENhlAS?n#|3K~y z#s|Sh|KDWK$H^Ce>tLF-z7g-0)Q6;EKNIt?RYlL=j4UNV*?uXSXl^liK51BNdiyj8 zy*yv?m53c50)klP?>hoy+8%MI@@gu;9}E09I$eGfh(X-{M-XaDk-fb63!e0G@O}8A zVZrfD4Z%E!Ma!P9)QG|F@Qt$Q%pj5v%rgssWZ2GD8eMQi#R z5mvro4Z{UNyB88I^br)2dTlkk4<+Ku`Mh&Y_S{bP3pselY^T~rxcpYt@dOd`WBG~e zH3%AB%GcPz`fR<3hVqQ?VvbH>oU4Q%JQsFdPvIY8o?%k>R>j z3RMmA!wtWM-;ac5VbYtIaYhyck@ z>K)NBxF=;Xaq$wlG9IE7Ebd{YYVS|~JoZe- zuiI6^IB*EwHg#AfJRU8SCGa%ZqcL;JtE2=I?YfTM5F@& z@6skgL`p|NRt7dl$7Jh+dRjF?uOzDmv&Ja_L>ioQPO#nlSUWcMLeEoR13Z5Ng|67M7oF z_d@!4D_WkUsHVruHYdnrcj${|0k@sbbz0WlV$?s}U_=l$Y?JD}2HQWg;U#S*^Q*w0 z#yERr^K|*`J<@ylU2V#f;)V(xHkKLsdc{!en?!P+<%UnMam2hU=%{2Spvjwn$J-q`g}Fj&+}INzAJtMcHp=yoA54E+#SslpifrltO2qE;s6xD>?!T3Lx=v9`M{izjWiJJ+ip! znp%aSYjAY>(dh6t-g4f7Q{Wryoc?;7Q2TE7-ma#L^X1$}_*^!1%XZ!w+;VUwBM5D>Uo0jydfYh*@R5nnSDvRwCDY`_#NI&uk`cO~%^=RYBmf>I$#6lgH2o z$6RoN*~%;Wo25%!YkixL9eCcUcIfz*9q-LWBuqMs4XE>hl9@gM_H8X?A?xJ4>)0Fl zB%ptzgMFmS{>j*_#XR;ltp#4sz&5-2zP3PhCIjbap|re}eXJ!{*|!jyqA_M-@3gJn zyU(3Lr{hc6f*o=O!3idI^>6+f{Yy%s~=Se23l)L&FHwlt}H4yW&Ia-7~aDNTY)PoQ8ZLok(BU8;n@&2=b*h zl~|$CZVu#T-bMBWvO?TlfSVn*{cbOGgmFOr`4jJNtLdITX&y%@xUt){wmWd{3aXfF zAogI`et>&-M32Wt&dOtm|CXBq%XA0+aQ7JHyngJ9OScNB#cMkAUUn6fDYZH;!r4J8 zJVr1-8GKVpQ-l}AG#imQZ?fL22Eq?{FknIjimMj!9=yiCV|$tgQAoy}&7m42dGGKr z8^7mF&?i}Sjb!0SC(~DM^&`@a&(dQt@5A{dFpcv`NSbM6@i7nY;yu33mKZ>>|8bA` z0vBVM%}w=*h_CdWx!?I~(4+r8G12E3#>ku$_ezB~X>wN5Mae4ud`*)CtmL@u!lO<} z$-!j1e1=Bz7DZj7B<2MUCvn8|WAO7{D`i)BTAZNC)s8wxLhe@>6BX0rP!ffM{FV#f zt(~Fn2D!_dvDbXjJDpoK+phLhncA2B(#lykv^+plS9{cAybJy?n-`;l}CF4(@ zs6J~{W)xyS`l>NEOdyyWW-rf;z**JpC%f`5TZwVlu9c`AH(AfM+DWs;9CO&Z zG){cYu68sEg#H$%1lv4(4DKu)A!y=4m&>2 zpe3+)ah)y~Z~YxzM8;#teJuO*=UwCRC_&%(UH{G4238CbNaw%-?ky9s>PLifs z`}gB_0T}!E1EYLl}fIP_>`WH47lu-?qf+=;vzm4fbW2ZN}Tmh1`N zNCeP}^{Uw?;82LB&h(yK4<>(ySz2F9nk!csx!{f@$qATo*%|%BArBC zSMr$P_J+a}_W~{Ao60YVd24@f`aB-qcIwqhC#&q#$$%wFO$&LOYd88HZ&!$D^tZp5 z!Y9(<{LBR*N?Tf8~4!CwRVd(2{!{mT!VZud0|Ay2jvR!a>L6-WpUj z7Ij!=?=N!Ir5m3_L&kW1y3EGxQomN>TeB~&yG+jAvqa8)p~Z08*fn~P`!*l;hLUJ! z{Bx^PXgq`aW|IG9jL_~5qu$Js+0A-R$c0j%>i&Kjuie5uWeKWr?>fD#&HC0y6k(YK zZRM6OP2g24={r*r$j3}0jY4r?wONzPO)MF8Io|Iy_-&T+FQrnKDAj<$i#Ns|aq-m_ z6{TGm!@w&rhvB`r*d{jY^)+$r#BN1{l%Ze9wzR5D5Q=CF?S&%rC^*DmA4DxzCdg5nXU_gPIMKXJgT| zUNdfM?0`AcMzFPp4=Ua3vtIYN`svTN0cz!*Q-TJDV>F=ng7D*QCs8wnaQpOzp}h?Wa7?MszKi?*-21X(lW?a8HrtY` zWfsI;a?D0qbhKX=d*f&E*-H;bXqZYp7SxTn{nfdPGg<9Us^2VkSUpBiOvbmOg-WI= z*bYx~@x2W;=sDqsmJ&tkVCNcUS5waxJ0F&OBk#;>1Ch(C4wm(01c+#K068-ZQb(k0 zzt$Mrw~zdeGN>-X*YQVl6=`QLH9;;o)gJJIo+e>g1gnDdqTSg(!{l!XoYx0AUL?pV z6^EuyGTrl^-ydwKTnT!sV{IR{6PHfcKweJcRh7DxA&Iz2MJI#=67h6CVG%WWE=Pc@ zM5a9n!2D3|o0$mY=QF7|#ENlbc&vz37I)=Y1wy~cg0Fu?pr*gtdYGmi(vHVd=Q{vl z8;!>v;V1_|U4w%tNvB#(P1i(BGPk60RSAr(T~Vf@&s@2ZmNE6}?1RUym)lH_-*2jk zsk11qp>8+2cbc+M zuH`0ZujwKsxo^2BuZ32ueQOBpIF-aX|5-2bA zn@jHcwb?x)y>AX=;Wp4PH^gz~XTB;J%qMsll&vyH;^RSG&JdGHy4DkS)HRj(5#(CW z(PiFtw5NJO-J1WZ;Jlr!J!C4i3bIb~AiedKfLi0_oI`Z^Z8m{4HFeFvM_%vRNuwcK z*)2qHyCHNXaV@TZT2$dz$Ied@LsUwuMwf4);d@IDO{-CoLvt=rp%L=l5F)FHiNpAQg5<$P+bacuR9*8&MN)&TZm%AWGKb}S5W)9LBU)Qz@uFyk zhLgG}5QyWRGuA11^|Du6`*{cnlq?Q|xHHjAR9A!v)o6887qd}Gp`s7O*Q=f~pzT2n zC{9U4vb+SPCU37XJVDj-_Q&GVh2o5H&!_=%~h=GE1e~! zs^kg$iV-F|Dj(tg`2S2_@f$MY^WJPRX0{ic@x&o|wOn{n$3yC`82`~(eO1|o&U;5` zPzbV(&Gr~6gMfFH;6-616E zyI|(+xgwZY$7{~1b)l*;Ef1`iYIhMLsmNm0*3g@#H}M!>XOY|7N=AnAm^C1|$gxeJ zP6k$3!>GIG_0qWjg->51#TCp+y91C>WzPFv6h49>#$k!*_bCm73~ZXs*9q`h{@^dg zS!u0e58i^cL^6MN?6w`x&YyoJ=CHVWq-8k|&Z(+let}9bXPB&z3h z^C$T|wr0I?UKTs#YUBhL6Lek!Cq3ioth9%2Kw4>OjvdQ|BERZ~uDi}V&Cu_A zZ|mw;HGbscMJMrn^v6cXgUq~jWJ}9kNl7rT^Sx!VQgz<3+tK9g``JmQhO(p)mWNv% zi2y!-k!^wWvZnQHt8}QUF&54PTbK3ahO)ASq|@0t7nhLhi5w;lpcWwom`eR|{d$C{ z%jywF=H@KFC7sow2IZl6%=6ZENsTVIwDPl{X$PZS!H-}(^?TbcegF5X@yN~HYp$X^BW@Bw0mXi zmD=3OlSSDve02*`{_LdciZA{S!@hFh4yg$2s#!pN$uCKiH`Ki2k$tY^H2BEfNhOJ1 zeNf}Un*m-LmvaeH7MtFnN89O|>r&w8El@yLXORqG+kjM@_y-Mjf`7LO(*;u9rHOx}j z;WIm1f5I^jo9?7{s-qm!Ee4WQ>$%b!9&8Z%TuqIaT9G_EyG=$;u4d>+1zuUdB4De+1 z=gNgfKNo8mR7|GG%v%m|+BCSoQ0_0s3}7+*NS~uC-3@k|tugbH|AqJbGDHgkr`WCS zowL6tOt9O{f9(#W^glqgkd(7?7AA7D+Yn2sw*#P>4xl=Jb*M%T`<`0l^qXC02CW+T zze#YGo=-+ATB8ehGOeD-Txmv?H$u{Er4>JWu^LFbp^nz-t#c__mb>pd;>mc5wnD#6 zoPg7ACPwi2KsDqPxp6pYq)cM&YHk+V<`g`aH>VgT69(J`F5vOO4CQ?BJxWC9^e;0j zYqD%9iPEu{Z?av>&42mA<3{UZvu7K+!crhpYQ0-qs#ej?ji8~Nx15BR#h1{5=JtDf4v3KD&qu~92X@8_@aJgD`o-O5TjWgGWew4YEs#dw5O|%h zQ?aX-i_OY7Y=XF{Y@h5?IyYK0+T?^%(#Y{vu0)(zKl=I}8q?|&expsID}ZgFD-j-( z=3(-XCmDgD1 zR82pA)RqMvx~T9~#ttV&BC)y9lNjil$zZAdh8pF+gyW+7h*6#3eb>_NwmCt3{j~N4 zqwRr?oM0{ifCzDUx=DW5uYnBTNscg7MUbQUU_ers)gZMoTC;ZnajE{(giAoraeDsw#E{U0nA8?fG*39T z+}5y8)$GW;cZBeK%i1A*h3Cb3(XXC-;uzb6+G_AfeHOy0d{sU+FcRgCU@z5f_!ZjJI9jtn-QF$c9Az^A8Y^a4@yJsg&{G zAfVt$1ONMH^8=~lm0@-Ge=q#~s+n47bI-UWmJ(?B|M~UbKFEQqJ%(6+`}^-zN2S2- zv3})`a{p{JFc8Rx#)5ii|J~IO2oN};KqUPiKL;9$0j|OYsr|E2p>A;?GQ*JxX8ZVm zEd@Ri16NUD%l_R}1Tb>OWJIg~cbxyz5Fc=r_`Ujv|7$7m2?qv#RhJ%X`rmWl2@Su> zhg(keudWyo(VAN{Y4Z{OHQGP$Xw6=qHO2os{J+iX0=a@qjj9yJzeoGO6Z^jt`@c`@ z|2J14RBw^_Z%+ULQibAP;Z(J4v@L0TePHuBOBKvEoz`yL7>)n(WZnc%x+x0klZ-yx zS1Dqlx0sqzqK#!z2sw=*`mgKo2iE76Nl84_KV8h9BnZD%U$qQV*nckY=f93`K$$|} zpDt#T4sbgLSX61k|6KNhfP;9b0ocd?T=n8r)_iEwvD%K+qUhU?|1Ls-Tec$P9dMjry?>U zPSvT(lM${cFM$Ay3kv`M5TqnUl>q>de@#FuG~~ZCd=|^tzYDmTkemmJMn*R`HwHIW20KSHMrLkqZbl{+Miv(Oe++sj4_jvgcY0eVvj3&z|JEaF;$-A# zVef2VXG{E_UIRlr7iWG_(*F$opY4B*)7irG|FLB2^uOKu=OE*MJ&eo@OpO0W_rF{D z{*&@3I9izev;3d_pUiy!!~Flq{FLU1yX4N z0D=H1Q6W`#kP96s{iMOWZ=F#c2Dejw62X%$DMn>gRn-d&xTlCsGq0yF)f^15Z%k`y z4pA{eX$f$eIWL65=NuWw)Ni||4n}TLinu@aaBVd<>Dk%;_Uuh|RpwqSZ|oa`o!|lg z+u+Kknz^6wFHE^iJ7|EP!vDnJL;ebYtlsGBigqNhpCCI0AViSz`tUE{y)YXu-R1B9 zL&W|6iP-Bv0WdakdelSXD7WKxK_l(JiLiWnvKhd5|KoS&3wYyzgbd1)C*P$+r7+dm zU#6Ldj8c7mQ|*(B;QM(7N>K|DJuKX@(1ieQcxrvH7FH_*vh|?zu0`9e+k-#~DF2~j ze~i_q&)kn~UV|kVDnrNhZ2Ec^0h^S3U(*Qx8bVP@OeZc5U8{w10`0$O7|^o_T2SYZ zM36cja(f2=(zvzj%G-x$oGasH+qXyELj(bZDU;EoX{GXUh6P=j&bG z=T9p9bE&h;N(uo_A+3Z1O+#J1-ezofZ&9pJ^={gl1!&At=BfGlvEb7$?1d!q6iR%h#=@}482A^l;<)m`f$1g84lwB4P9GC8Y8>6{72erYGh)N;Fxy~f=?ZK%Z=>k=i}?zeWmrB64>Y4Bl6OT*G*iz4MKpMa$2+05b`M@z<#>*d z7BzLQJ|%0iM*AqAI7<&ie7sOtER2M=H=pODZRirc5ef#5I{n6jH~;Tw&=!0W=Rd=``gPt$70CEqhD03VK8ox$dB ztbT&|Ks#t{`tUQgEtJhOQR0C^V}*LpO;hjhe#42sE%;Wh2YC}ZARQqH{W1TKhfrW#Xl>ji5+-y- z1B=FrA|7}CM)H>@iU6L3!~4`5?a)G*ji7cLcYUP7Y*dlxOeDb;uA=f$iBt22?oE?t%h=B781axExTkIcQrZL|mO{90B|9(Cou4>J)8Id25 z6%ym?*T3tfFH)H|@uHn9j*7J|Xx{xY->$=8;Bupu$C<^dTJ4jZ8COD*xF_RdfXxla z7Q|=*DAD4Pu!RAT!;P7gK$V3#RA*4A&sNG8GjE2VwvCoz$Ky0iMHQ1Eo}m3!K0y_w z^2=34#k5f*ge~umU9puc*zU7IK)eko&61Q87}uJi?yuG(nlHQiqW7nP6j)i}7KAPj z`VDy#zKEJZ<%+%afz!Z`Av$XHRk#$q3t+6(D8zgD`D?g>B8cOAC%Hz>&L7sW~nyiz$VTMPCT z6t-tH407>VzG|GldOK814G-%t?bkH1LWzV+J=!KA#d?4b3~E?%Su{_nMBOd7KDnxO zz-WS3iWjRP!;hPOZ+QGQ2u@vo$dVY`V1QqhnGN)dOBS?Tl~xbLwKp4>z?5d-1t<{3 zBxKZ9oNK*L&aQ?lV`YV#V^`i>8SeLXs6cI=DM>>u`tB_Ww5fuoXkffPCE1J8 zb_*_>YXwWanSl(iQd`^^m6U;Vd-ZHHqUtep)f zar-EAW+*^9sHL5t&vMJa>xBkT|F zV?RLgb|3fuvU$LiO{+~2jmB($RN?cgj2nC-{en`QsE;yk4MXIx6jpP;&;HBwx<2TB z_lMPfQq2ejWvElj)yM+WuWB>+`1_v5^urG0d{0o*FwHVmxQ{ijQ5pM92|L4>i`o?d zNL?E589^Yp5>X{apBwSGEA!MWq~u%60=%GDXL9?J!S;KV-0-~*IhiY~f=ZY0Q!*#F z4JrQl>I|v-y@O?ZltRq*6FbiUz4Q=wV@~NMQJzlqRSX(#Lf96x1s)9iFRF_9*ywZe z0&*ofehd?DJuL0*&b|uM<$6f#`O>|^R~uQ`td=t>m-J3i;}G0%Apn{DyUh}e(EEU% zPOB?(1VKz+o9S+}QJ7YpBRGg&S+w}ah6I?l2Msg1x|Sa-fH^@;383i0!t=TnC5>us zjO~Rg{b!DaQgV1%JgIkb;GGEjt`oDU7Qto-PT&NN$ElZZvZ zotQL`o?N(m8y$5{1fnTQ5F?Zzw)26#JNt`D*JD>4o^Yk2?<^lKaW4VY`{q=^-DNLz zgu)EGDMFRs+qDd;1o&{l-f%Z&FhWCaeDS@B**bBdRThyRL2FQ+@KkR6`>mhAM51vM zNuEU)CPaccH1WxtSC)bI=bJ5x>31_Il7Kr!b-Q&G?%An5QxFe?0Z~HK+wUQgWw2OT z;9V2%E)hxuu`6}R0MRTo-s$LHV0kc`dR&sijp#6EePTp$$66#pKW;nr{E3U-jUpo1WNjIrw?}%w24BfLa=d&re(L z9e(Uhd2q_$ei{x)jwC5hFH|X4U6hR?hJpL_8Cr#*UfXcC3DH@_quha}gp*BbTH1o! zG63&W;m&YTW?Z8otLq_`L2>&fbfZv)67ITa^7PQ{1K#!LpI56V!?E#}hG8i_%OR9a zQ578a(d?A?XsW+C!Oz9?LXvb|CNcCfqow(6VaT*2GyxT*pfE6ftv6%4*mE8Hzh+vo z2vK4LfixbY6XNpnAm5I)7UzT$TJ?l@8n$9T(rL(7h}1yTlfsHJfIv)wsK?PVY|by3 zkN4SgX5WofV@ZR60wkV!40QCpr_ilF4Lv$R%?d$m0(7Zhxs~xFGhwxOG@K9<$%C}7 zM&C;&LqRiV>vhz=1=j&h3&YLqDcGcx6jCxWxJ<6#^e$ttr^@vZI!n-Uh6T$kLWF5l zoXX(sDl|$XGi6M|w)ZAS^LtX`5pl)s3286W69)DD=%dqA)cUV zU0&mc91s$8RR+@-BiGD&qG&j?k|BC)3bssZgU4!1WNevU}Jj$_TLWAU5q;=A(_bY zl%TI{Q2_&(y}EPhD&j-cy{A6ByAcirOtpHPjn*h$H|_3?JFE&&w%y&}>uwi=W~`Wd zsW?4G{oVLet}9CNi@)fWd;>tJ3;J$ypteg6yHGjpCH0FVd>S1E8`A;w*$h}`T z4TGJbRrpI527k;as@~4eN6Z$OkY9Cv(P=wBAvanq2Flh`m(zgfVT zetkf>mS7UoQ7XciW1TBY2AL~%Fm>LXwNMv)9*hp#1^4T+-aC|bx@2TLOdq z`p>)jT02D-<71*l)WcbXGViz7f!XhRkj<2MNr;Mts1y?LXbW^_H9%UVJ-=IezYf-< zDTpH(N{}|=TLgh}GzAI!9LJz*|Gc8`zF+J=St{C8nq9~vDcfFtdj*M`2`T^Kb}8}| zQeZ&n!5paPtK7v)2idi=gm>ok^pU#7gtCO`%D#j;-tvGet}BxW$8`1K+xm9q#N6tK zrTJlbv_kQj7l>R-IAG6iHO12Tz2!`&n;je%OkCtSra3JhNIgX>K769-{d`Jho{H_a z0x2Uk-m7)I5yFP5#xZh$2?j#?_`L zvEgw4iY+zn;9vDe2~h*9EY!{C$^Pu@9q0b`j#iH>=qW}YRlg3o6+UqV+QaMq_Csag z{&+IAbTx#`0*k=x)p)Ms+6#|pS~Iv*J&$^4XIYYcTl{F6)}HiFtJh6Hq{G(=ia82e z5tT}7g)dic^b#GrO@ogc%kZ(lZZ?vi&`%#uP%gI8n-z3?4ptU7lid^<`94}k1+Gd5 z#gZ}{0mj}U&Tde=P~2pETQw;WG*0~RJaave>dsmL+tyR>hvSK$bf7p=s1?*T$4Ei( zs{o=JrNS3vB*%qFE;JL6e#$H#Cs~nJx)yvllaWw-`sxx%Mg2++=PViK_pDs8qx?Ji&_MMK5&vEoxc`=-jC$qpp<8D$U0qh_ zZW0##mS|2q_|fq03N52{Wau?f?9)a1V7#aTRjrH>{+U9}-yM1U;C45QNLFzqL0yuP zJ>TsJL0pFo{BJeo$%fUcl@8X(ZU`@C&m^Xjo}3T=WnjLcH?s@oC5jp~nVVEW@_T08 z&Ge*ZGdD&vaz};_a(}93A}njf?TgOwZd-|+SPdaTa&fPE@~_elBsHYZOvJDqV)7B8 z%RE|0q^l^kb5~*)D;`i`>x(Q1!Zpa!(}~O`)VeyvhpSZ!fzH!c{EHv2<2@z1>48#2 z-w|QzB-F*?l=dssX^|Fw7Ta|cKmR%-G2rvU3L(DxzMkI@UX(9ujm5v;%n)bsRU*CR zEFi*tuSD%7@jaC`65Tj)L0e#b2%MIK^tbCGM&9Hbi;~+Q(?$_iyqzy?D)-^Fgci) zc71teHz7%URj@6QDJ2{9x4pE+3^X0)co|JpreqV!7mESP1*-S$Jm*Dw}W1H3m_PFEss}P zRSlZf#cl{iV^^j!!zEPrj ztf|N5?l6MJ77UoZ`2FM=yKnYNG+TVFvTrG0rN7`XYX(9a(IJRQR~IvDG@F{#K_ZOFK!4@4J3~e?}O5uII8;#T}vq(D|DZi4H*Q9j5jS ze2CGOpV#=`y`f;&J2~E7z(s17|0;rjNn&O3MQXuUL=bSFFl-<$!+Tg#x{Mq5kcV6(FMuqBSQ*_iUf<@G!Ch(&$aD_ z$2npFresaM`Zd)94HCJ_#(~lo*K>rFN4=63}Mtjy1 zQ9WLP&&-1kMhY;uE#7o>dpK$H7N+{2AjZXW2g2H(UxJ-qb6w_Qa4iReC$I`oV>Q*+ zg|b~AuHi5*_ICwb-xuALGC%pD%W_n(Q&D1IGf|NExL~|a_{&Oe8AO7gWNN%)^f0Z1 z>4#!qV@9J3X?o^h2Oyw!f}V+hE3ER>Lwx~vH&fU4&k4F;VUX-u9p5qIw?_k+yHhkF z(Uzf^+rh7$kUx%MarnwW65-9OE))Ksn);l| ze1xD#ckFFRzhHF(#E;`TyjF$)U#QCc-8*hooY1%8Ww9}jh{F&_#}}#w>R^$ycClf7 z+1H-!TI*GDQ!>=Ylw$?8vu%Z&g&#PejYP^Xxa4z7u?16@9Phqg zAEUMWnl9b%dp|S0{+Vfv!KRk$X894}m6@3tu9QjD@wRX*D-?<8xjv8k^eJ zSsrSqS{ZxZO{S_I4r72}8{$GavlISNV!g>GidCnRHCA^r6WCg}GkM{LKEs2z`~#L#*G)H(5e-8)($tRC066%tQBg=stD|b+Tt5qEJ=ktM zk#k=HOlRnH1}8j`&Iw5FN{AjB45ifm!^8tzpx6w!MqkHZmF=w+QIh{~EU3|4H7 zI5rjx6X+}Dk%GMm&15*%_tMPgajn`-86^YhAb9V1SbXWX|-XS;#x z%DSKChaeMw?yGjOI3Xm831{32Hfhk^^$ZAD1WNv)*YoQ^9yDn@v`^6#%?d{p&_)kj zx9!$^MLKb@)@S-oAsE$yky zQ{{+RKUW~Y;Ya{Vb${*-_CAo~NwhmlNrrR96NsTM%Tf+S_u- z_^E)7IH%eicAb7P5BdJM(-fh2U}19BNZI?}KNA@*O@GgGQr**a#6X=6G0Ec2j3{=Y z3o9=5w70uiX~deAcX6lW7W>3{ZpImYsv*#8wxfE=t*!`r2cGKk^X$&uL+0x1F@#b) zRP1=cY5ZVJ>q$^C(w3o_OB31~ubZXK%~l0RjP=)426;SEb{V4dzuFl zsbMwu3trXSTzd`C94)W^idbGXpRH$K8K+TnIEi2M20`EAsLpK-o)THh$LBXi15A=S z#}i>=hYeqMqxM5SsAfi-Jj^e+^C?zkY6B#TWU}QaGC#3CPUuLS#yO+|I)$m^uhuba zPKAF61^W+Ft^XZ(j`{t*`o5lbU38(;FE1?P33fCNiqxDRKxiTL@T%M79qC=Hubc|@ zE%}ka@^GkFtiU*_b=ZrNxkC?<{P<$@#|bxCmN79ld7vde$8y_AoS1%-6+RBu<3|%C zH`shiZH{w$S)tC*NHY!X4MCZ<9E!pU=g_u!J<|S0}y7CECS|CC;P%QOO zyLe7BreH@sBP$;q;oIu5HVX(Qk*w%JmOcO$$ZTZ*#d4-Y7u3(?mXc)P7)3ivm#WYy zwjeO;@mT%iV?3jMSKR$u5)5Yl=2DOvXEavDUmZ^;0VeZ;L!af{h&>c!@M6=XMhNAk z=^j-){|JfI%#zIbx0$`Epl_1x5))H9bXRi`?6}g5Kjxu)A%R zw3j$G1Va}iExfw*c6K>?IF$J9*hVKULNev5Mu1BCK(cjf#s~ti(bz1==y+KGCSO{)#mBC$IzE{RTo<`zf@x24~@+Zp@n zo|3n+kq6=_^^3{ra}8P6*RUK0W-QMc`^Xc-_bKo!>>a)enB~CMaM+^1DEm(+i-^)}zaq|EP)^4&i8IUXDf_VyBLYm30sm$cGtygc^M@`nLbR7Y z$HjAot#g0Z@^U72nU4wyp$qNiNB13F*EB;2m=Bn~AY)GWz_wXAxa1b`HI%Bt0Akb{ zmxH@lT2v9njW2`f^inn*@eMXUf`mmIS{9&(&N^cPGC#5mJ%5b;LY(?+;H1gp*dNYL z9DqK9gBljR*Oz$kQHaag9<8ctZ-gFzC{O;HFzUGQ}*Fh3mX9nmbU}fK_uk$Z@Q8$s(IxQ7%p|ICLj(8%?R}cX9HJwDo8P z`{@m;aQ3-RNT%?%kl{}zBD%KLRJXwlx?Kk=eh&?}TKYXmC^odhGOaxZK}k0JG(3QH zWueP=3hzfuZRx|?${clnZTur$m^VdO3#SFu#7c(U7@h*&s3^9ROMel(TJLyFVDpBC zJr2X4;zCCZ+WeQG+=bC81999t$4G7T{WmG7wJLZF_)7i2P;H|~>y=RVI7jreHL;NzPslQ*^X-QTm zFi6ZQ1956>0^0HFSIqQJp&X|>Qvz05axPC}qf zh*yB~>W9jEDu&yW)6i6n!GGhCROGjv$M|kZ5LGEVAi>A+noBNcJ_Sz1#yP z_a=(6T$=5=w|N<|eY6&(Q$w<#=^r|;$)`8%0&|VHMa6=Nmttul`FSNG^=S+GwWOur z>fl`)`j`Ga)@FmjnDJpuZ?mM65S-*JTE4$E<0c_~;7rEEy@3+- z`V%@l=&TIODP3w~Qg;rm9w<8LqyM%YWj(-xtNxE~nT??#`Ov zlCk^Q@R?V+gdc2y+|F*$CE?jaD@C$u7$jrJXxOFVE=62=Eiljg8;D14tuc2vRXm;_ z?kOCxjNB%EOA(gZ-m}R$hV~XD0l|zPPz^k*(uyeo@msU=k1N2TXm(I{epzsMKc{P> zD+SI!s$PcicJg~{utl!PNddw7a;-T(7+P^ih#48@BsaO6k$|?%tN=FQ1sk@Vo-()r z9|mUDa@-{yf*M1>ADZ@P@)HgsB#Y!G1k5n^-&+JKn924lO;UKAp8EJ!cSAd&y_jMq zcvXP$kO#!}pcm5mirBcCbtb--blmLnf?cC(JxDbNhIrTfjyr(`3CnHp?|GdaZzzC6 zpov@3)d1p|J8X9~j9yjQE+-{9EeNV9fHgIGl2sW31UhwlNyjP&Y7a z+7b4_KX21L_s4bFZkQ>~2%0yit@Kbay%*o(gR~P@A^Y6(QE|xHqNO7UG4u`V`z}61 zGW(p_PyhW{>0fDJtrFjSgvfQyZ5KDfa`MinrqGAl2&vhLC2uw)t^v=IxN;nOY%>Yn z$vGBIM+{bfaLvVFCqVsdeL*OlqC4ZOC+qdaI~jGfncZG#Hj&4pl_(_n)HL?^(%^iD z;I9Y!`DKz9^<2ea4-+o+OlyW&2yG~LC2#Uj7w#n(8M8szM;nrtd~NnmndA>+fb`}c zDkU)WUnW@wKk`dfA5QO|e!pre*vD{r;3!~kb(4knquM+cElAYNqnIuA!enRzPHvDc z))8hKH_C(PMr<*hdC@c$?0PaqL`SU!=Pf}zgRF}gKMpncBNTpq#}7LnFEByjRaFP{ zTQi8)8J=<8V}P9v{91lMz_FW6=&9X{rH4jvE9HOkKn}j^#@Dv8DF30NC~u0(SBN$7 zwec&*g)hvqBVv=ehw~8w#|B>DAs?}{6 z6tR|MLw09MuYi>C;n!m*kpaTF@q|0oeqqo#iSXNvsz|M$EHgQ(L!Vq*D?}TS4pElP z)e&29LQd%CmpdemLZ?C|L)c0}V*1Z7Ys#f-hC#f<TU%3L_aqVG@?S9tKgx}L_mAWljhui2bvw3JP z-;rhHod~?Gal_IZmQ&1Pd_=bqwDL-4em3slP^2fIBa#~SD(G(Byo zipoQ0O_?5H$HUM&B)E>sHXz4|RbRff_r`ll{ckh@MQFx4eK-5=9Z5zvSh`ulGR{hg z$bS*{I|nOncKo^_OF?-pjMX~cl%X4=VSpzpP-SnDT~<(^iz_Nn`OCTTl2r5I%!)A< za636~ktw!A2hVM0U=UGD9HditR3|rbpLc#Q21|Mu_?lQL6>EQz)~n2DvwWo|8N`&` zxa|+y9ZCv_Q#(i+Sn8>I@RKpok7HB6?zyK?B`us=T{6~MJ$Q3GU12ZydeuG{VND_( z^R?xhI`V)QHf@s~gQK&Llp0Z-n^u4)! zV75MXyo#D{A5qG7LTj2aMeL|ovG?D8>kiA??rDh+W6ctnxRNHWoAWd6!ow3d+q=B# zo4^=`+> zJ>nlZFpaPy(5DS23iuosK^L$_vy?l;|1n1~fFW_;-z4HvebY6W!4~e#r0G6f^=;w% zfcce%KLT0$j z-^IGtUq2>U>HzZWrU;+b?U?U%l2g0TbDYcuEUcw69+b9=Q|vs`$?DhEI4|9 zt$g4-LZ;i!TBfeBGS4&Mz+SrY#j!jlQ=5lTLMv2Fymta$D8gAZuU!YW zmQ=`?mW(MEZ2Dh4XY4DlJ0tIhQD^psnYf9$e6Gss{H)oiWAg@AsJj8MYC&5irf z3Z}P`6`b*jnKfr59)uQK2}8UK3&Z|x8x!{#>w0~~o2RA9kGr6RYrkYS=6J($Qey>J zww#!X+z_EUjty~(yAI##u(vn~54j;MQ+4uwtvXEu5wx%UE<|L#_f^nuusO3|W14>k z8ury6kcsrTZGYps{k%hIF*XuGL2TknpZ8RIEjn5M3G!{K1G;d#?`K95V4(PY#n*Mh zf=*%9M^YUdMR|!V8f|^_H`9KcfbnjjV6(jQZ9WOI<$z43Xy4AAFp?$=Or))fhZjJj z&C{Yxhgpm}C_u=b$a#X}bTr$=qPrEds~Xv_k+)KCFm zQdqa$kanr&S8Q#m;=r(&y=*zXA$op%jrQN|yJItVi#L|X45K<76o$7OhtzqqW0OJbgS{ukxs9lH zJ$DDYm+P($p^EJsk6U~NS` z#XzS{Xxc6@8R_zy-9oR~j*igq#$fhzk1OLt!)M#eQ^TB4ib#$dKJZCQ(q=fQSgG|U z&P2R?%q0nu{-e&epcg@gb-Y#%YPe5N0a@D-nu51~8EdKvg>!G&2hz#Cyl`R_SvC%Y`{Uqm`^7`E3djU+M2t=dz`GE* zia*x~bt2^!ENjc&XXJ43cw6W%ol+p=YHd)*mG{dgsPUzgFr67bX@8R#x2G16Z|$#m z+*^Y>RaG~r3PY%zz;``168{_F&lgLE3kU7ZVH&kdQNtYA31+jYDdEmXpi66dGbRal zSCq)2)HkI=k+s9-)qDvU=0`KUyd@eWY=AT@cAW)`QPp-W7OSsjZ&|gmsPbX5jys*U zR%zew4js#uROnj6O5=|1k0TBDomGX~+K+P{MOJc5S%6 zr@No^ou5#Q9VREFzVUA;?85=7X!-H+qd#=qWufV#g70A!TAH+|41UDIJBfxC1Z$$x zRug$;+P6S#GyIBO#UFODSqCkNJ&pY8;&t3^F%J%fpmkGZMPWFH8%Qs0Y)z8zG%>>K zsy+JZ#vI42?k}z+y*eE(BPVpDf2~LGaHK0@JGK&v{G5-~iL780kSXgCHkQC0SYvSN z$uku7+T%F5OS}1dR81+=z_1CPbH_Zuh??MGC7tgDbn!z%j z$2=pT#5oo@A*UtneQOj1yF%S=fJ)^^e>H<33zrp5gM^Jo+Q(cwc`Km-`_(JrcDtlH zpI{$wxZO#iTKD&!&WWoNuC?XzDP zo^XSpBzp}nd%Z;*KO{6evou05j;F0vbNIAUkT8`hlCs(1T>j%>-my}z1Hmg@&ls9h z`>>c+EQuj$R6Ba)a6?SKT9(b=jJIV!zd>F-`6l-u7h|qxJf@fN7M&2z;qGu5ZV{c) zB)lG`)nm*ayX5F-#>VR^yQY8&OE{Xlgb4ESG7yvAP*R12Pr?6cUYmkud?~xIjD_$t zIVUHY;6fJjRvQ@7Qw8M7`83I@`;S@J$V_>F5VH!$z$+SSqm< zrCTf8CB~^`y&rAii=Ox5AqlPm^9^me1uztPI0(Krp!$Gi+<7#ZV)#~UYC-_m#E{~w z(nct!DpBfFU|)~{UGRLgY>syF0HSDG230cf&`g9nmMGOz8V`D-KE51Vht{IvW}oBv zdD8{Ow&xXfI$RppYM}^mK}8dix97V%WUhsZ)cwp^bAd($cY2W`wtaw`k_|whxVU~J zM1I(F)q_dkQ9HzI=NJqdb0(+&lU+AbZvXBId%{R9! zRuCC}8-ympMk2Pzs8<@TVUT3cVyJ70;}{9+Fc~7^K!gN&DXgKK-=ZsoH@j?1!vA)! zupJO{iFP|-HBW7Qhdk!;LXkD^IL9h?0ZGDF+HVXm%!2bAiQ`w_Qg$FM95|-Km)vf- z$E>$8sd51)ZmVEhqBDSsQ{=hr4NItwEHn@r|E#s%U_XV!?;-lq#0TfUZ;V78xc&yN zgLblBLi_XmgOMiMrVG~+aqCNEMeKUv+&FEQFJi>I)0vz8SMvKfz) zf*cxMJS(9QGMyTk48uEL>cI1iYt^GJg$)0Xb%?iuUr_$gw$d;KT z0we)@0v$mOl9KPFagq@^Z>cg}R^HsNe0g)=a=Sd}+)@I9Oj0Ix(x?r_0&lvWcJ z><*Y1bs_5-MV3xDh(gZO^n;Zq#bzOrzxRnOOt~7oeD^2rJQR;=^Xq}{o*V(jvpc(K zr~qZ&Y}umSDP9{U$ft{czZw}KE!+EbOY5+uK!{#JvRs)$1~2{`I{5+RyD>zjjbS}( z#XwmtDWv=b@2v=#iiaP*)+hCQj>eQ2yM;fi?2qM^_bZvH?ghh#xbNjXiMdnGhR&xY zt`m9m_pG+{hZtoH^3}&-)wa+NQUgDYQKsMykT(F+viTiQ-B3>k+-$!;dn(-3Z z(dV4FORLkUB^_Nch=0ySn0@=uR1^6Tl9t-ICrFn=!n7lYML-W4#p*_r1o6&k7(pIQ zYfTce0{Z&Yc{*<%bWh%Rl@jP3`9$$BX7o4mfD}0>tzs{LI4>NfN7AD5A`3fssP(S20g-it6mkx3u2hg!myrk}G@hFfW=7YTJJ?o8Gj4_XG z3(qY|L;i&Z@cn^nI~K`C3aZ%m4o1aigK^dL&b~-VFD?TEr5?Xc)Vit}8A*LroX>s6or})aN)rqrj(&I?n&0 zBmW&n_iK=MPULEhSA5>ygObPXV5mF+j1~2d%}H)BfE6PXXi=glcn`iO#&`6&Ps3Vg zc&L6cmKwsP8pQ4DI$vv*;sCNcBNQ*94*r7-uedV{7!PdC-+-YM!EezOFcS3Ac9^A( zB4ief?t}RHQyQt;eXuC)tw7j`f|bRJulLPF^u2GGr{>ibgy!W8l}^a=kgp>)n*udqXT%Aa-HIEer3$hrK<=eZ?l0?brA21HRtlooV?kv)tsAl3 zrt8<2;X^%n2*jLPo}Z9sPp`daLS5_D*SE7A*;iKP-a*3uduL*QPOZ(|N%V%q4MWye zTOwECu9mP6TDe^+S`vvtBg0n0T!JuIn)p7ymJ?_~%VC7UiL~13LZp5*C43}LxOB?! z+!*}_o>PND&9NoNp9|`(Ia26(ihj1vOalIL8{!h}cR3D8_$szdfz`j|Vx9?v2X;U? ztlOJq8_havuxG@TaQ5Hx?$ELfmvlwwp!LBT1!X_>bpDuJIGOHvdno8-d1Dh z^CcZVb{dG$GH%Jk+YK)`=>=zHCa(f@ZBbOI0=?f;T2#%_g1?_wY))i|U)@8CD`VZX zzSQA(8f~h+7`Xk=5!wbNrN+h8e=%@MPVRsB(6Hlb2?$Ei!uHk9C3ig(3zl z-rp#v4(We@?Ml8mTEw4>*k>`b;1`5GAH$9*8yg?p>@46Pm>xSBmd3F{UR>gQa3!J$ zBXuhx*6f)N$Gbqe@{p-bueh^33`I)2!+(E}feYGvSWH-{cC|hnT&@-@vo9cP=%}!H z^JNQyW0K@Zq9%MGTrBHgP`;BU*`vXj;h1q@OW;ck7>F&LSsLE^Q+PKJq!}wrUCC z(fOl&`87L-e6MxB;(Iq&?Acwvz9m6+qS3Ga-1~kjbO>njJF~0RO!w`bZpRj#3&5z` zb$+VsK2?k8T(jSs>KMK65imotL41ZKs!?$zK`If9y6yM(DjD`t0b)=cT;z_7cG?Zc zuxhyfD`uPwaM4Tu36D>PeFNifR7m2j|H3qb{~r(^^7OF*M8oGWzN6v)tA(5y0pDo= zVPgsOSzU(vhZPwgUQyqfZl?b$nPhW(fBgf9j?&kAdIbK%3vfOzk{5{p7r_KZX(xk! zrg2g4ja{2ew|N!*qV(e7b-$Ah`PY{I+VCBqIZw#l1^2(ma14BMWw^2=`7Jc6ZsO=N z&Vbdu(=RBZ?=Mpp0Q$v_=)|4|UR zYduy#GeRh3i3xl9h5bd>x1bSQ4&(GbHdv0ymKuf0hTj5=NNzTPbw6AHBnn{If%T>Q zsK3`CIsg(66n|OsPHieQU?2ToNbIvXDrm7T=IBJ8uUbvXMz7f zDO`%TgEuypJoCSt4O{fTl^^yC>im~Lm%aqX7$mS^8c1)@9WEP9y(Vw$Qn-I0mqGJx z@YZ05Yw%w_@JkTRD^ifaT>Ab?4uniM*T0;X{FmnaMgyNkOHX+BryKLPz;mVifBj${ z-Gx8l-Hm#8n&46{U>_Brj|!lae^C1+kvQI#f6N}06;y)M^ZY{odOgK0rT=;jE7d~? z4nWw#zf8{dt|%>4-JUeH8QvO`wvCVhvS-SkefG_HnmEaBEUAFq)RxB@#fA^53c5E9 z3KU^k+8%^YRBjra!(X^|#K)&cJAaLKftSpOQ;e${xXpE2nof_54NW&-5p&i6D2j?! z+Ko72BO}#5jM@ldegU9?mSj~=Q-hwd)wMgXN+VCcyDp54=VCp&dfqKAgZ|p~py{Fp!`H~&@aP&Tb-c=a z%b1OXYes2Pt8J6Pw}q5}QWz#6LYi}}Syu6S1cjQOwttNMyw1lA)%9vCa%020c(Jv= zW8+A0R|(Gl)BjN!!aKN`$TZ`)ufyrK+m6sYW>q;vJp`o1dOe)qE3tyN5y0Zrljy0x z((AiY>w%ZT@GVpJ$iUKGi1PY_zjdA_)K#)%7|h^f0vZ8ks6G0(0TV7V`oYb@x<5XP zx0Do3yW6S1y=~_8CiySlcBUJ03!n^tls|5gG7c4U!mw3cRD5Im&8P~Ex`yAOj)N5a zb(0&r+txKwiu@kF9G9wZQ_tzNvb;@eY@zjNiR;^+iQhqD<4PK|2y`?5KIH;mX8-za z9a`KfT<95iGVe(-B|0QN73HQ``bD;4KYeAOOvf$KP@l4qb^p=sA3d*!eNKPza3uS_ZTClgS;?LaOCZ%h5HBvPm( zNi=8D$1HF zPSFY3o8WaK*6S+y39T!EAnD}Z>sfu%UhgMJB%XJ^o?z{J)8 z)$!y;l;X&Ua3w6vekOzj2YP6Ye3O>0jDw>_4C0JJGKUIz=-Rn`b$V|esD9bNH}yUl zNMvFrYq>l%1sN4i5r;yZ8WRg~Q}xn15zi2H2fV>i%$FK+M6BCa} zzH7X*Wd+!ORuKFs#^UwY`M-1C-iYBt1}H}V?#Ex12GIYc?kbKL{lp;XWx3bC%e@5elJtA+OU-ZXG&NteJMVI9f0JiDU+;Y6-uu0N z@12+aPEV&l*DgcPOOIFk`+=Tom!qfket)2JdRaOw!t^raYE0re5FE$ay94#2`nzn7 z$!E`7jP`9>Q6pf_OlEG?A{G`Na_g+Dz{Z_>v1a8utY5PM|C_fKj_rHUv$BxGt15)L zO%$v~h$X>ANnFK2Sh2AZ=>`k-#m2+ZwKcl-?0{~)x}ih2j%e2+2L7G{KHU`1zb{*< zdi@bY`(xpfcCiz_q8!_dxQeu;2=mZB*a%AbyLu2VArdNU8j7n zBGr4(^WX2aHzF|w5>1ETo$4m}q~7b#5`_im-}qR&I6Z&S6eJi_BlCvO9{ogKs&7Z~ zqqH1JAmvLC7`*T0o#$CE(*veNmai(*-05!=Xd3R+E6G(X;`Qz^h+^`8(Qkr&;4n|0%6*j8zW|L3=|9McW)0|wzH=wwr z0$E8V*iu@EIR-mY`gFu;Bl~02*fTMpM_c%tEl8s#A%ei=u4M1O38RJ$=n%1Hh*no}JAOb+C!7|q7tVm@Jy`S7V(RmSfZJ?zx3Uh8$S&cA< z*kF)uOOi5uRhbK}Jf64ht-wlr`BNMrqDH$n7|k$+SZVUs%jGBes`8h2jHiNjU(FO0 z-{t#?P?VC5!qgniy^}+j;HWS-Vng7c5CQ~fd8!;#emWZ(P?1}PitJ+cv`PWjijX*J z7$lDxyBn=$7_ICvg8gCeC)~_Z%SwmKla%jO)zqU#V2V&Bfq+4HD^g2Purm|6saYs2 zC{QIhsA@*Eh(=(m2>8X?YEV{1vFtgfTr1(k`4zlXQuVH&6OPQ$SGV~gP_TOUK791? zG%TDxA1!xm!wtb9=-k#prYAiTm5O1RujH-T&%61giAd8jBoG^ekbpFq)9FG|No*~>bxY=HL~9Ny#KpKV zLW2kZ0jw;w2tT~{1$M06>d^v-G=(KW0|V~`LCV@L>H>qo;ASJa5Zo#Xw?2O#%z+l| z1Alie+k}r_cn5Z)O%b`)3yPv8SeHNMt1eYUjvoReLlNJm1^S&j6fs>|ablLAC6Ve) zwX*eOCyfcN8MX>Da}@YEze`TD#{{AMNnO#tS7-D*xj(`ZqfupH zek@kih;>;!?KgZi@k7`xc4buHD^`;bRto@mu7V>t09JbdLL$PD(7rYLjv9=p&K&E> zY0oM%UKvz%v|H_2a3&EdfTUHNkN#yoHY{0#lr2f{cT=d4I1Fwhs>~EdelEm!OhA`G zy>aq+ry{&l9N}Pk5zHr05PS=rxq011h8Amni@4~&VL+ozqMvbdX?&ab={NY{-On&O zw-Dz=`lC&RgL$~v3rjDpwX0;JVUxcHLMzLZHdKN6N)#~;`mudv24G)$0lrChVPlvB zLFb%>|J-*a;$xzfkAQ`6iebt{`Vm{hdd?1v8Zi_(d3nm7Aa(}5Zk`ev(ZTaxBVUcF6uu-b(W35&T)7ts9oRk^4}S7IwE$~% zCAZIAi~rv86e7*RWJ)?~zm$#^E)$=_Z0J3DFwUQFCH&(8 zH6xucfu{nl+;V*R;(J&%a|zr<6=Y6_5<;Z~r5?E5@bkAQ!l^8+pfbXqB-o73qx<5f zr|zN_?Vu`N_4By4;K1`3ydyO zm5ejze)cnpvI}wb^LM})NL7HDC~+Z0NLIXNtxLw+_dk!E9cc)*1;HNbKw()iia71s zqHjC&>C%ae??4p=OzO71SifimO#UX66F>2^EGwijs}m^*`bx<}8#isI5cgy6E4U@C z@SiN|2@4J86rV=+OINPKgc~Q~^o$+2v{MYCI8-Ym<4aST=pZDqGzF5mO2(>TJTIlu zRXg#6T=}CeypyP&SJmV!F{H~AKIqpX3>{;DX`46Wo*N&p@w zMHtYLFr0FHet*!F2cz3&cI3M zk5FojN-3Fm8iN-iS{jB^Ep{0rn-!y~$_eA}MjAqPhE>mI!qq45#w8*zzT zj+cJ@aynAhl37ju@S`CiE<75$R&K_^&u8MaTQAV?#XQ%{T#7~C%|=2r&P z{nXv?j|x;mKHa=$7>_iTx<_L2Hq#Gn;g_GyRLZg>LDJ-P5B;wvaJ0?qzdGv`kX zzw}I4M9nU#!o06%!C~S(TeqA&{pfQ}N9+K4en~0(uga4vv0@1GL(5_9F~0Mi=yrO4 z#CK|?(rMa%4WOE+ss>RpWnAw>SQ2U)FBXYN;JY{Pcn)QIb73{pC`AGa;C0o4KQZv! zQ$S+}GtsmFF)n`m>{}?^my7Vg5cV8Z)NIPo>ZFdi@tJ#IZxg1Wgv6!6OrU81jqDkZ zTVHwr20z*bm>u8NMyUqqar~>}LwxhkIAs{3=;ZS&r~0~f?tu1fTPvUag@3NV{SQ2j z4$GA3{8zD0l9XqzI)?W1Q^}W_v0I&`R4Z;-iCl!|wsZxDeYZoh5O@%^@ z@*vny_k8573i>l2bjhajqsTN8Q${`*NYV1m#rfIlAnEwd-Xa*o#+-^DUjGy!{@S=d z$ls1V>$V|}DpgcO3pjI1kiRdBjU<|e27@%71U06Y8)+34Jb+P9SU4^meBnq1vj_?y zFdcqRHd+#ul-dey8x<6O%ACpkB#!(9w~NAnb4K8Y=RQWus5q2Ym2*sLL)Pw8lw{<= zo=B$l6#vKcxky{T8rQS=PZua(<-gT|U{Dh0PKZQQQiR!NSEamtGaW+J51y*&Z z{i@sJ_(w4;ed?_o_lxt5^3G#hoz$y4ifMb1#W@ZdV%KqK@cO;~hJEKo-1y!{Fi`W{ zot%z9I~kPmWq2kIKn}YDZRitVBE2}voyt&`N#`HQJBay6TZw_Ou&7w!xtL{(dSA)m zm#i<4bp=8zG6PY{Z@anJNLCGcbX(0fMRi{M4o0+G0>djG;G@STDGlK4VFT2RLvm^w zR+0H7{m9{iRGxYhDmv63>T#{J060_`LSmX-K&~V_B1alT{z)LYD!$i9Rz=p{3-L0s zF101PUNOl>nBT)eDi`V1C{nbbveweeiJ_S;lqc?0+goAU%ZVEsd(qt7E;N(lrMr_|U zT0-WhDUy;u1M92;A9S3{vyMx-m-g^}mUqWP(Ca1R<0W+M&8N5B(1CrF(NF5f=Afrv zKULMu+jilmtDnHFJGUYzmezM-YvxzKV9qb!(10pKCMW+)(y^1d^YQpo_u}%4&t@KE zZ05gi!)7Hw5@}yCvF)VS&*4>U3bFx6IeYiiv`n0S%3$=R1CWuzLWIZbZ%x5_lfEER z2U37&?Lr+CMoSSMc<_GOfE>8lkB&%XIk^9h$65R7C; zA}0+`KKUQy{_J^VJb5>W6*+#6~-YmRpvUe;Kx}AqDn2jz8Kj` zxGo{4Z3_g2%A!~fk;4PfvP)}hTeuc>Xz|1{?^kNT?03J%g71Grzte|c2*=|w9b3_1 zhlwb>M99e0jjrLS%>MS{0gmqlCbQI=pn0cprj%gW%!LRHqH)k&!Fm``>?}oed`lV* zIrSuIIl5Us=MRKhgW#sNVq{&5%gfNFM?1tO(yFf_sf?>3sKJ-|=!L)PD97!Mq74eR z3Tl2ORP$xLZ=nzf=WI(BJA~Qu|G?R&59f5C1z-L=4JQLLv0EMMhcf$GjZlsqhX^%kBUpFKJt>Yuwl+B&|MpJ zg(|`=F(sJyjnrbtO zKLqn+z4O@E0~6b57XR7>t5n*LU|qr?qG%D1qhDPBWYD_5cdnO`Src2en0KAu8RmT1@)hwZqQ;ER=VPs4b z9((k8P9Fy0>@!YLy|Ik~B{wghPCSvguX{%%Q|({2VV5e?!l`6g`1AI=pW?-r-$g)+ z&NNDM`kML4kWbMBg^z_sU^9md%5jY4NvRd==;CO?85d*3Ctv-DvrZj=o}Jn%81v}$ zxpUWEwQ-3Ev{(*d(rMp{=B$B^2Z8%YL)`^{P`LyRKZ+Uld**{gaj|kzsStZ0mi{mo zbE!g!casFEzf5OI7DC*zO3J@&C?0zKDFn5QP;sjMqh#7qMw0PknUHE0o^y zUIWD}`4l_ZSb@rKKp1srW(qot>WNX~$0~0)#k}GW_ug@s_UcE74G8BXb8oozx6+vQjYQ@>4PB{87qcFI6ngFM^YfJQQO}kO__|_%ASu zF3D^uy(|qo`#*Uw=7G-Zo80dI(G%yNf052Xf*ANZwrh`l+zi-f_Q*kA&W7b0pu z5=={_0hThlZHhgFQAPWyk~IpP<|`^IL5WN`5#BSdyAWsGaS^=&>8PZdp3_IHm&i&j zr3gw9bi8tGDiou|tNFe^FkWz}TPgj-2!UKg)2l~UuIcZhcJ~p>c?q4<2KDca4P0lR z@$o0PB0P+3OL7-^&mfwji}a^Dd>Zql_w{aDMRELCz@bH8U^or2>6kF#4${#Zh~eIU z5%kt8<#eF95wF>@8=35cLPJ9^`MnSE)C-g7h1!musU{SXCTTHLEv%KyV%Rh??zfUw z#l=^35^Q*(Q|XXQyuZJ_9gn>CGscV=iQb*ts!d6vsPb!}O064^Sq%yG83xo{0C>}r zV8{E}1B8r~MY72lO8ls85rLG#T;Zg~_d>(+RY+R?k-RA_%`|)%=)EREi4Hwqb>$E9 zqKB^vm1Z}>I>gfPLJ%r>kwL_$M}~5+m+aWDJ30^PiP-in`A+19C!q)dxfS?0M?P%) zndmgI2j+bGBi8)33~AibIV8ZY1cTc3g%V%t7Zw_Z0`}E>sPN=-cgUEdr`Ngwxaq={iWxZ4L+1QPez*k<3dDmsFm10 zoJK!34u@odUBQ;DL)np%r!-LqX{e}(0zqg?YXZiQ=mrYza&F}nOTPdkHGwDa+zU_O zf-_IX_dm@;0Uh4jC&uHNle%Ha?sHZ_c3qp5tneHbp+I8|mm|_t2qIv9H zFS_zP47vGirWCI5%0TW_9+Y@ys$~AGj7q2@f!`TNq_kXRepcLF%L0tJ{iXX5-j0hq zL}E)ca-TumJ!RruY9sl|C(l)vj;hHOQ}P=_WS4kc_Vg_nea(4THfteP%vy}(ZAl2D zMqs1CMg){h8wS!EpOcx7DbG*F10OyEx*KZwGY`pAdU&za%Zk7X_9%`JZURAXvK$H( z8Rr`br>J=)g-#?Sw8qG5&(anu2`y4E{(pPd0oYY_^-n_f-h*tiSz#kvHj1k%?pCW- z>}Nl1{cN?{e%-C@r)?dz*4?^A1qa{&QT7Og9RnF8Aqm-gk?;3A@7;f1B3g*pTAB;_ zzxVFG=iYnnx#ym9F43ZCwLh%IctiZNyS^19p;eA2_!)JT^;j}c9@y3}WZA2=@GEl{maSCi9=`ld)K~vDcf2k**>8lxh<-W@Nt8FNqY}OvcZAA>AEFy z-+fEPy>7P(30QKclOrpSsnR>{|I zSqf#ZL(qzCkanoCdtnjK1&o>4j*CHrua{IGJtmovo$~M_cS%NSG8Rjyy$5r$^g3!G zrbYWnDvpKs9w^WNA|fs(ocxprSo^^y*|25jiPwYPtUmP(tvwx1M>EH;Dn4BRHqz=JDpFNcCNX0Z`=lqHAeqhyOu>()&Q?jA zHCAREStzR?TPD$g(2i6%bmO!kNM5+-aY@e3l!%;IO~<-0t+o*(Z5&P@HodhLBxfK5 zgu28R;=)Dy^JUMbT**0qGGf9JDE%B)23E{v8<3qr9|ITtI2lsu7W&&Mu?Z(|KKlW=CnyL3c44(Cm<(mJvb7~ z@Z_n4AGQf|x>{vp-d42Fx>30BDM9@Nsw(5NN1P>TrDOpl3a`Q|u?dZKsoq?+n)*n5RXwFgNvPK6W`S zAb=d#Fe@24v zt2Jcuce;1V!W%D^J$bvOVOud|yupkFLiFajenYc7^^5!EmOp$Sdqv12^y|3HX``X= zlOnB!_2P{qC9p|Bf%wVFM_!asGscTs2v!!W`mSHH3U%TG2WXo#LjPE<=>PUF24>`h z56?^mvh>Hf_%Y>-mq4sH83sGM-8!W5@Bx|VU5!(g48X-k0Bk*At|J6n08$Q~ph$1J z>1y5r`NlSta@??a2W$#@LUzGZMnD^scMg!!48}>wK%6z3 z29mN*8oTm%SSuoND~2UkJ~;joAz0df46&Uxf2?nNygK2av8=@GGadLwxW+ZSnFm%s zT}6oq7+)Wz6g=_26O5deV~tigqA^Pd}4Ev#+J&*cg zqN8vMFkI%rB=?1j=V4V3P~)9P_8gJnZZYUn>XVxB!=3_D7OSQnNpq|KM*g@A6Ptb3yBmd~= z*NuS|72zpEw&Y7Gqzx(fGk?xBId{Qq)pkru02PV29G@ngCO(Cq(}DnW5VIyqhT4Ee zjcEIfHDr+f0i&_e*1P3t!dxw&#|fMvZ50#r3+KH5WQU3%ycPmP_kaJlvi-I7($i=u zWe~$!l|gDoLpsIDf-{)sqIFM$r+CbgTfU0LBW70 zg=E-=HDCbNlvYb<4A1eoJ#siiW*2cUZStZ>Fb#Ix_Kl_Ti=TW;hEzkm7h5n7acPM;*i2GEH0C#mKLE!Us?x_17mOEaGV4*bxQz{NE47kB3Az2 z-+Q-AnLI%kEK{2yB>nN9Ct}vjutP8oIIRN^KD4b~sv!DH+koLXYV@*g1X3(G0m?+d zU>*|s_MxY%9M6=)#9^aXovUWOnc&KS}TI-SR-ZAP=X5@*5t`kALh%JSKgN~b0$ghxD1#Q z3ldL|ZdB~(fZTWsL^}7rvrXQ5NkzXB)ULCBn6yGVxO_-P1KNU=+hmf8l%dv}|>@yPR1 zR#_@ZNhy+f_GnkWHrURnhxWRY`|Ut>$BxxXO;M%zjs*|K-iK@BIjA`2pci5fEA=Mq zNl8-JX6a4W$PjD~?)=`pkPm=S4k&R^yqpN0rwxIsr{5!caJfha$oRyB7@X(x=13-gxW|KE>f>RA&|7Aru<@~pAMU-BrmLUu6gMk>G?$JT+J z%~Aym#&o2r$qY2(n|bs-9@c0l{qxxnfL&QKzuYlVIJVsYI1FQ8EBTQo|PRXaRv{;!44Jvn#bgoXpi*^z^%AH3y_(l;fXT{^M_2vpV(F>CAg#idp zFk8bxP8V$0DfwITfW5oK4_x~wV9_8P-&H__w+ee>^+pJCF4^a9o{(mB$l<;Oe`U$~`y#NP1duyaPl-p(F0=t1D{c zg?k^DrN8DWwq7;w#VJ|CHR3^Ans(SVv+f0?WU>pIIm{2`p!i3OpJb>cpp~ z0uwP3`uf%u%(RzCSS|Zv#WzWHJYs@V>W`Gb4cA?ce(sXH?)VOlWyk2bdu?5lRP6#! z0f|zFB|=KjUk#0PfMIwD6P^3<%=8pF3en^uC=At9HAre0Btswk9kwS^H5~_#H9qjP zCo=CH0NV?%duhcs90rIc35dbGY9qVv9oMTE2AUnIh-T^)`eeqm*<&RH6Q2}Sz z+7L8Hu_`Lcj^ZCG&1!S9u%U|isNYX6sJ+Zvz7T;0Go`7!MJgJgu|+_kW129zsBb+G zHx7n93%4JX;sb|pB-ADD5G(criSN_kCe_$09!0)BFlXl1K}DiAsqyF#pHP2U{N>AJ z{?{%Qw;3ik3$T#)fY3~~3CtL?CY3HhXfmWwp@tjg z)~d&5c5n280+WqnN@X-`Bw_-2%*K4Zah89>wO2`9O^0mRyhqR6NgWIWnO}`-zH2~n z@yWAzFNoywvJYM{#`E|{Y)c}cJCGF?1)-tu!6f)JAejU7z-k zP92;=pgwIpl`!7;rsKmP1Uu-IA199htTGAPk2?T#&J7nxV04HafYsXL&~LXUxz!{y z;hB)xm@N3-)dP8Jn6uWyR)!G~;J-hl2*;i?MSP$!g>_cRp|(I^Lm-<|aopQ6zxYY% z^7Y4lDCK*LW&he-DFqo_hx6DP9Luz~0I6U=w19gbiOHA@E3yeAQzZxGN2kGf8`A4w zv$3x>0+onJjFxfGAoPQ60q)p3&=%k5AaSRuE;t>ONJ}^M$^{TpEry|wV)3P_A==69 zL!wuP6crqjGT8KsgEeEjZ!I_hKt164kGXc1q@6cT_OHnWeDkE@aG4xER3r^hNa29> zKt7@Ik&>8_EtykBOZMFH5*`o9F{|h^^4Cm$Y62eo=&&0942J|D{pkpMjuadG>gW2w zkl_BB+;_r{ar(oG`S@TPJA2M#sfQg#_7~%}fjMXi9{J0zJ+L1DDldz+D1dNEpnl9P zf*&Zgs+tCwJ}*ONOc}4mvRW=amiky*0P+B+5lGWw)*|Sri9PN(B}kRfnReHp?@M4i zkXMO)7L(y8j{y2XlGU^Thc^J!)8A93UOrc*UObnzv_ef=oN28@h=V9f_An~eeaQB3 z^`|2eh#xsjZn@{X{Bq@InYo6SLX!_`#nfc3G8|>Yj{`vw5WGDhft5IRnEt^YgLE}O zwGy)2l)2NzG)59tih8ON?7+T0^K^P#OZcd~k0bwZ~S7#LU@RpDR- zyZa82tM0r>j<-YHl=Pl96%};H2?Hq-h@#T5zE*jC%MAz*lnIy2kO>#ekd9hd1O}6f zgOQHRtN^Sy{*h1`GWZc?D+1COXYLR8<>dN%_7jcONu}I10GeY%;Y~PTlmU@<;)u>2 z=ja;d01shL$H~siLVxu0Kf!2iyZBvCS%D^1GbYKN5D)$fS8nvz5m@f2!X==5C!?^a ztWxTlTV>VDtD%}4A>aPSSHutU2|c>ex5kDRf}*(H(>^yI`s9yQ4_1e#K}0G}M`NeF zN8q#e@yR0qN9nAZnQvC*;lrA8@Zt!~TJaqTPW0-`#Lmsi(?SE>AK4-UuF{^e1ZbabkNq&~I$8dRS60#sXmL!Z`X-5hws+<7TXwuv2{w z2m!bX0pvLEs6)*)l)H0#9J->I7q*Q~fz20qoG7kokcsDnNuT%#FnRLLgqirLk~&UI z<4l<4W%=tbeoT53mw8$-5N2NRb!PcKArT?IRz+*VxBNzfBFSWS9 zgK>E2Cyxl(lS4pFy0vx5?|$_t6pfnY$KUxnYy|o!QlhaJq8=(FQKY%YrW##!#0%R; z1*S$=yAA=v(Kz}O3DEJuejcbB5CU+OhZj!#QZU0-@7*o6z-Hzw+yQT`o2J1JtF7;@ zmp*-?fehD%X-Ydj-l=q>8PFf^u6*dNAC{jhE&QDIU=#(LiF=|tm`HePG{n&~EYeO# z4_)!`ZLJ>&Gm-4Nm~YmL?^ZB=nF$|=M`yzM&37xFBR>t$s8(7{WM$xN8@)A8{(!8E z#GY|p_6Xb(>^NCrq%+w8AcP+$NC38R?TNwC*3c;4knZGt43;yC1*kFZz3rYLEG@FX zNWS~yd*#jL??`k;nv_5yv$3*9sPoU&pE3YVO|5E-V;^MnYv3OVd4xcSB2x>IT6&o& zv9e~(4#aDhHkg4Ai}I8QV6c|Fga5kWa^)>(PqQ1*UrG`5>r8ed+tC6+qRJs5P*k!k zTRA7c)gEoJ-L_MIoHPRH1s@7AAAChkw-*=6J+Cg4MhGPI0E4<32jDuQqD77`CS<7n zz=!6=C%vq5k>2>4(+T1-3&tWKT@HEk+h9pMM^~_-Et8k26VgyxQM1tu8J(HnKjNsr zmIPmJ%ItQMk!R8{nN=4KMCQd>g@|Ti#=NkWqaNMAPi5n8cfc6 zCk?zn3NZl;Vd{6ZA`jjJRECol!N;#r0RH^c;ULc8aHp;HyJ6v{^diU0C2i+ z&83Pc3_O~^7jp5#w1UOMrkyJ#k=Lg=tA;L~0HSgZTD9++PK#iGJ@D<6_5?*5gt12b+ic}! zwa%I2$6>_w*|8mP(b{03r1}R+N_4a{i;ukzWKs?!4GT7deW2<4SKj19!Kqz&`(!sX z|F}A5LpOkS^@m19NCNIh+_U3t8Q}&q?39+Gvp{CqlmMc%PKn%P@4*83)?L4qjqA3F z59IwTI?y}Fy9>-)ci@!Y-t)8^$7w)2j){4C;OpHYv*t{MFxCipW7&(ce=qZAZ{q0( zrep#v07HVX6WskCoDLoL@&y@B8-u@tOnET^9{9~0l12g5%pUH^QT&ZISYIrBGRPCt`}dG^nP-wZQP z)`j1k@bq)m&6$UJM-*3D^Uid1WTJK9pz7z$q@*_|6rIyg zJ^KCpqi_@blkz*+^d_HAdYXL5tUG{Xz>zo(3k?Z|mEHz92BDb{FzjN74U-)m-m(No z!HRIvL{+-s(uzM`*y#(=-J&CozumG|G?EP-i>QjN*4^7)9y(|1yap4jXAK&*EN zs6QW^CKVrUmF8}5i3;_VBZbHgw@k%i%Ta@?!M!l)8&MV^s!&CGN(VMVUL+{YO@9BI zKjFC02b91idV0aOm@%R4MPImqN5E~%AsCLG1iOQ@T|ilbq=cbT1rg-^`Gt@y#f7uD z!FbBVF&bu&qULduKPQaQNmauj4g^R)(&g|u<0mo$Lk*Nh1n9pEWP4^+WcU%K4$ z(>o+7E(U@>k#g(pzm~dM4;;KlrC4JQJzRY3r$8ppuA|Dv|8p-&&0^e6i~WdzVW z{>&iL-MKb~z@_UO$Kk0Rhi`f*kb7gAL99N^+7*X6(%B?ZZxUxgy=9s*&}oKj*2!dL zq51ID_Ebu{TsCppu9N?891@6782S*Vwppw6@>lkSu9>+kQ%3#4xV$bP3XOg)a} z_~(X|a@@!aiOGqV9l2X%c4#65FiE3gykde5@rH@*j#jz(=MT#MjRg{tK1#}Yofn;} z6Rc1EQlB6YN4Q51RLIE8F!}4FKb8#p3a@Lgz5vujhurZ$KZIe|WY`1rl_s6geKTUc zsbF***U3ZBRetu9`{if9_?CqF;f%k5#!0R6NO4e9xuBpyd=}1>?2KWsG~6t^VFMsK zGD4*SQ(#i6~*O)PBIgcey-)vWNJmU;_$t%zZH6Nzs$qs^c|>%*}1@h5A)8YfbTr#wc|3a z3FbQ;zn!jsxK>vqeJsS8%+AL~eRkA7a6xPh)#@OEBRh@9g7Dus*JQIj`sZchIiF0^ z2Sy0|sN~AR+wds8Zr_Kc%K@{xCm&UqA88dYp z;vg<3FCyqbQke0Z@Zk?B4l6%CxXzQ?ry;DEUBRmIO8MwOzD%Au1sxB6#WJdp?GwmC_dN;5`!t*_>-sZFT$M8r++ZIFzt3<-_3EVs5)H_4l@?e2m6Xt^dd zCR}Epzd%ZGdrVDJos5}+wt}Rjz>{;bILS}$3=u^K?w;*?lzA{>%m~)rqD|QS6sTOjtgg?yVV*@MDGWAFmdi@xr_nhz!E3elww_(I0-Dw9~dC z&v1~$4UG-5cR%idguZ?jR?F1nL@k#Wf1G>HEcwBG56ezyd5#VD$6Wv@8~@0x))N!= zNUYW@Jru?SO|tIsX0k^Jl|xzoPZEFi#7 zFFU0)Vohx=RFF%h0FsL23D6mM=*KDioH7D%w#5{xmmd}t%FLltqBm9y>I z<629Co;_ea`HlvMMMg?MVDC96y$#UpIcF!Z0IA$>>(%A2gVB*L0Oq$6lac{!uvbi6 z^MW#61CT{A9qVX2m&`Wekvn7BY3jrQq?4Dxu#@HE-nh8fp}hzI7`p%n=%TAHfd-(L zyzw%S0XWEh*vB2IDpdqvCa*OBtilx`=n92zJk6>V6KkmqTNBGJ*HoELO=Eeq!(Lq8 z_Fa;bGgc7*+f!FwBdqwv^T9 zh~p@OY1Cgk2w@IbXdP|DMDLHm4}Oe8MB(fQt-wkCm~=$$sVPZ-J#{u9|Bt=8(@h*cYirPPB`xcP>2@iWxe@@`gK^WalA#Cp?Z|`Ov_|M~cgvWuW2FrunY0A7 zXML`8wzNwF40DWwDW=fqFsUxBlH5((B+!$b*{3Ez zk`(PIkdl%T?7!M2HGP<*j7~?MowE78^~wNkhU%vmmsaDjbjL(SNe1lI<0dj$*l{i_ z0qvGxoXuC4RN@Ar_i@JTFT*E|mV^5aNCFH^1i|u5-o~8}ZH zwK&{LJZ1uP(m^qfWY?jw$3dp%~UV0Sc5&d z2Mm0qN(uVvxmRkW75ntjIEK9RlDRmF94fbc>o?K@8y69QK5`65z=h3O9tt^*MTZ7r zco)EN)dz+pDdf57NTtL~A1fD~KMzUVg#t(9btHh{hz8<6#sNB&7Lh)y>qYaYl9M&> zT`4>vAW>RcDyx>Qkd@d9tXsKOk3)BE+kwHg1=@AVvhuYzq#Wc@9wy^Eue~Ml(ed)$ ztE+I;SGg2|6Px?N7HI{U_WBF2!Iprpv{pCDs#o5G!cM!q_V|ku=8KCMYMN#Bi*HD4 zZHv6|%qx-*8862`>c72gr5wlAUQhn%5!iC`ldzCbse+vLruWwa0d&gW9(-JmH}H-Y z4|(=4Pf1TxkG%Z&^SFiu)?|^_6Zb!YE4|Puz`p#~jO|Dmtno$#Ma%2Yy@GnR$X1Z} zd7F3QI_E@9TU%NqAFg^|3UCE^D3IT-tvmY2Les2wH55ehwrrQ(8*}B|_n`Kh5D97k z{UOZn-f@eebf5z02G8uOQ4lIoWv~Sq&JR z>{P4Gj{IC1pW4g7C;*gUFfEIt{<}Z+DDlJMB>*bHo>bYzz$5uR;q0lBGBr~&%SXUg zevMSu)yTYy7D+1nvUg=cq6$}b1A)*0Cgrx{a0%wfvnR;V%4Avb!Yi=ETZuv5BrRCY z-LaiIbnuYW9I2Li0Hc+_z;VveqEeB`-7*%cmpNxo0v+onMY{|1YHi9|XW+(|oY`lo zrd-h>*aN_2A0)Hpz_26pK2Vac5>^&5B@)WZC2^-=JN8?xunE@!IqsmyP)VJbE$L@v zOW1qidW{$BG-cLQNuN4g#^X-+Zm!Taa%9_^u?;wKs2JC!^~h-47&~d^G}*FxgJi>o zTryUU2}h^OvHEHmkuyqmZOE06Fe#IuZyuFARC;#gN^yRnM8rk{J~(29y5tZTSt$iM zIx1_VjG75GQ}jP4Qd46yC@oMYI1+0E`LC1TISw#8JpH}ons0nniVF(yS1hkS`J!BT z-8DLKr_4KB;<6GoMeX5Zs@~iM>gLe?0*IhSfmDWlgqmup!{m;MA1XO>ra*}*Ob!<2 zgPN(r05~pZpF2-N;=(kKRZlKQ-?T~#;&TNnh3)5ssL|)EFSeT zW$Gk3XWk6G(bwNr(z)hJ7?;KP`_3KrKrSFu8I|=|kZ>h|@`l{7NN@|&OzcHw1D4~b zkJL)zd6&qQ3udaqQA1Osj2@ARapflKK?Q#VHE>cjG(UDa+<*~)bI(Byr(o<&vd52+ zK-_EQa>TAASMZ-g7c{Hnf!4P`5{iCYoi!h{-sP8;jN(V^lO z7$@0NbJSKp_pyPvk1Y$f1kM7(u?^RHc|v89l6|xvK!B-ug#wqp*zZ9s&%Q=@_q$Ia10Qnq;3NRgHO4OQ9;;Xc zx@y?~>pyO^Bm`q6yakCj2epC}dOOPj?{IQI|ia`&(O@}<%R7jqs8Uhs&j6awY_YzK$^uxdUGO0Y+m2_#52fjKM<2aeBD;h%z3Te_zf+5Z<~iyR~~as7GmW9 z9J@4umrwyJ;b={ja?M#Uy@m|=w&6UP--)Tovi|)KV6N$yY}vY1F1+eu7g)M+iAmFO zwyFaHQkcB>N1g($29Xq}qZlfwCwKoLYFy;?x8IYm-1QAemX*ks^_wIfWO)Y=n>U0@ zI)Mz@U_EJ6&KP-N^A>sYh2^-&q7|!Cj^0II3&WGeMa7ti9paC@dOIfd)(x9vI_{RE zX*F&~Ud6>EsTmpIU9{m+i(;95;X>R*R3NKge@jyGQ&l5v?8vbSBQIQ+oiZu|MlW_r zI3`h0aIj=y0CuqMWTat$w|7HN9#_A`gA{)2^*2GSL_r2WREAF&C2uW%LrNi~S6*>c z=EcmH4?ox-9iUJGVUjHniv`V_(S3XXn;z8QfB0h%H!b)~2Y-m)l<|*&C@@KU4}WWj zvmBnMKlJ2J<<6VFE=~D)@*Ujcc-{q5>vj~$zPu;p!*lZFdQcmg;1h9S zNjgAIt(Z#pB11!|oq~>thjy+zt0J-;?&>3JDV}`{^=7?--!a*`B9F}ZSMrTNDQk(<>dBkI{ z6$~yX^NCB0mtdUbW{n*Qnd)u;z+W=Kl=BA@Fni<(VBA(6n3*H8&=Gz>1aUwRIJ5!s z@Wv5VEWSgap%n*mDKt797G6Ci27BSa2r&JIXNV{28xk0%LKdT@j#2J7kxvp<N{8{uc1cQ2(RRdvVaGf2_#80~0~vKoP926yxZ^Y?LP_F47$l90kC%3w zAqQeI&s)4eLZU(ek2nbe%(!hx&(1(QAw!RPhXU>?xRkO7q^XC8r(|SjD)NnnI&64k zgtm|H2n3Td75iUrAZHp-O@~Ns47M;aLx(~eu1SVrA`Hb*RWOuUh@6Iia+|qmj)cU9 z>L@c4xOd2%`s&ZL%*W)#C*=c30rj6GL!%=lHZxO}f4Ck@;%2#g$+=jSy>X;gB$Xe1 zDCaC%D1W&BE=>MptrGK6N(x_xftbJdy70~>PleJs+L8z!5>8oV) zgcO;C@fsc$iuzzMF+KjWv$B9NJ7n$Nd|8NFs=s;_uFac1O&)&cZBThHOYm7^< zE95aSA}{~iHIkT-WJNF$`Q<3T&wKwcD`lVfuH#CEIrFsBl2kQyHOVyyWWLtc2@5s` zZO}TfPM-t^XMOE*`j_o&zq8!a@y_D8GWBnlDU;!A*KL%Wmwrvop1V-aUA$N-t19JC zVIdfL6L1ChD6Iw;HgA~V=04pAWc_-a8eR0Io1_^CI>Eb1{`BXEuyyc~pWXL_y#2}= z2;>Zr%fEWD-29bGa5R~y%scWXxhxN{e8((%Zo%(%3hqHVh^;q@n zJ*F!IGiv*mtx%>2mdVqmh$mJ7jl%{YFZ)!cPySjK!nMB6hB~U+zhkT_+9?z^R{ol? zUH2meCCXt>1<7q~RW!g=g2n}6!nRzUDT8})^R5Ae0aJlQM^K+I*1)1d7}EtoP1gGZ zIAKT864tZX|I(-&3__+@qQ!?bpW%D7~_K6SU#f8LMYdb`O77>Boh zw z4hQwL{H-McUbB5a=Ma^RpN%7@i#605qfPFIQrn?7f)W*?7&1c`yM77s#?| z0FUpofX3bsGT;T|5+goIS{&Rn2b~X$jjDfal38^|JbQBKw`1kgVDa}IWAJ2cS#ar!&o8QlNdneIg4|0sU{^qPm2k*lxRHVeQO zoLum9B<^2M|8Gasf9b2K7sYpXLTD=bp7?;my&&J<#LY@S(F|xE!ZwK zV9(!<)wUTtixzB0y`gLrj8)wiTS00Xw&8?_ma2z>;aCnk0!`r0l;e1`rnpS%3oE1+ z$B(=tz`Fxik727Cfbi&wR*9*vlnNLMSrv&b*(DdtpZ*ujP;9BV1$%qVhq}s7nJ`vf zU%5sv{Yb?2DG+;qKWw8v52Sfo5CDZd{_?Z?Bo;?TzM+9?D{b}Xf_c_!-j-)yYyo^y3C1CC}@Q-kD493U- z+ySuu)wjhHD|Z%TGZJ7JBoN3Sx7kB3h0=RC8SwLv()K!8+u<)IVd0W<%XMIw?wVb{PP>X<>1Zy2`ddu%-eW;$UFX&{&H}bqvQ@ zZ8-KIGjIsBt~gs0_0g7Z&-}}DYFvMRbLE|qP_d5>3iAN6v(K;)N<=CEslDBS^ zwEge@LPhwvJo}qp$r1?gEDjG4Ki=h+7=l}_Jh9k87Xafp7bf4ItHiA|W5&zG^B2jj zmoAn}oLn^nSx|ME_5~>f#7j+sFi+lm`+YDmePjXd0pu2fCmeL2$Cwx}0&w;NSIkau ztjo&ECCcl#9N4=Tdn4Fr$Ev*i?=N95)GRG%?1IJTLU%e$o`2?fYm#6BUv%}QnCNBl z-s|szvpht(_KPk%Pd0zJQ3?)zB%KhU96n;WOqoAZ9{BZr66_O*+jel3_k;U%BOa-N%+5EQY|* zYbw3v1%z?&HJ8b{H6KVII%3-F8S(+H^~y-k1adhpFFf;{B;vTSw6s_*z3xhw+Zrk_ zKlPjua>vg?UG-~1fWcimDY zZ01B61Hr9xF1%23G7_-!w8mf=E_&ICtH0+%j)f}CtOr;A#q(#$t8csm8-n2&dnsDC z&ud%^7y&reGhi>CrPs*rwOgdI9xJbq;(dqZMqIeKe)A^Tf~%A#j~}lq z`%RbM0$~Sl2lL2JV~;$vxTPyd$;Y9wyC(~1^fK@ z=bkG`Q_^Jf>mLAn`^)9GTn*9PCe#7kT3Bj1=kg06Yo01kKKiH}E3Jf8m^d9W9(%hB;&`8l?mrelNTTQn;e1+IYqTPAv8nR0bPP_43=(C9vPYGa>4aW zC3kIs#1$7xirZ01$Ex$z!2&r5;=RN@Tpl{SM+))2&pkkDA|V<)e~wIne#spp zM@eS(FiD18Kb|V^NH4FjSS=VO;YfPNu0615bVw%R_?BaU^`XMgd5dPd>>IMjoa2Mv zr^^i(0XQo_b=OFo3D1CtZ&r1Mtb65c2z&(LTBRB&h&0K@t(%}C+#(~+8YKx(qM6;X zKvup9+45>o1?Qa$N+qy8#Idp>YLJOySU#R(nnw176_nwm{WY9VTG;;Ul{0^Fq+5DC^qq&=BYw zVBhWmVIVJ%WORBX3ottcFSyO13&I(B7wp70!Wv67Y_suJBOl7_!|81^#PCQ%EibxL zozS0GXKQ8t1AmBb&w|-3s_`ByI1B+R+^7p?vAJloy`v3s03|{-;!;RRvab_>ERrDD z#J}_B%>>eD&^!NXaFGn(tI_wDVmo*Vn7u97@AGcSEUeyFVkI96Mqw17*Om=UAq77VqOM_|&@;w^DIB&i7rldhmx~j551z$R_ zPg99KF!)+g9;NS$OM{Th6MuX}5`m3*LFaQ%KO@&H{i+HJy}xFy%nA09y?ge_5)71D z=qv1j;!kRFs^p@x(o!Kk2Z_8kk>1Gg zFzqk$xQNQLu{gQ8CF8Bv(Mm57Mt@h2aX_&Ek-jP>EBZL)a5Y*3LlRbprOc`pW^Q4-Lq<#7OkH=!hgG3SL9m=b5Dl}Ba7n31s5 z>L(jM_z;-24631N%Fx)dX|wF!xm&`cA;*u?f*4Q_2lpI+-!7PH8Y0(R21BGxrQay^7mCiIjP+)S%yxnQkPgYd4u}p9O^AcsxxWfj!~l!?z+O9r zVJIOO5*Q-eAVymXS?XDHW=rOn;SkY`QQpN)s1}czI9_GmBVnJQ5-Pz3*bYQM3^p5L zm?Rg;AdJHCCbtE?Xjd9cPC=@!&ks$vJ+5dTW zX#asY_}vQuO62+DXpc-aB4V!0wcrhHgbMn&F{7~m4gsF-k!`q1It;6~CC%ps0ihDv zwsQ}p@M3g=Qhhfcl6mzw+Yg7NXB)=;;i4lNW*Bx`w(ms$mq9}^Qnu&rR-1(^n-oDf zn0_S?3d#p_oLYXAHDFm0pfG9Wh-1Y3_16CLc^mjKAd%;Z95+-vM#IBM&GAY!v`Gyy z=t_uJDxA2PJvsBiVy#kZ9*k%?G7)vcfKG9lK4!l7r?LWwrg4xDj~BHJtrvdy!MgBU z1Ng)z%H<2Fe5-!=GX8EXn`tzy6<6!XBy2Kn5VIVtzPxxy% zUR~ju2R#PeKMMk!_{MQ;kvX_+#~%68dGl0;%?w1fR28+ZFFc>pHA&`sDZsn2Q*4#WS3hA;3>h6=U$LE>88OL??^_nI) zFi?2acren@nc=4&|Ae2GjuC zdemvk;%>~ya>75i-qxv?`LK?;ot#>t?laC^+PG=z)_&Ccz zJWfuo#u{#iF`ct)d?;`k+y}S>II#ipuwZXZbmnS_u2Rg_TDq-R2!JzrkO;t+sge=} zjIDWTzWlI@aK&O8_133mp>Kmf|0)Pjb=NnBD$!kfu?QJ~B+EHAIni4on?mv3^wboq z&)~dBITS743bhhG5POY_mgeoV7$`@0ge~)21tccMOUdl zWUoY?jX3(3_C+0Irwsx0Z}7=}e^CMBtpC459i8P69S)^3HYB0w!u~pLg37Rz#sxRGx~9`6@w5M5CjGhz<|Iu zm}n3JX9ffY5x|+zkAtlkgus9xFo*yK1h&CMgAh0~ATWpk&WwH>Y{ei11_XgY1TY}5 z4JI0dz?lJoK?HDS^y6SF1|cvY2n-^C0fB8W(I5oQ3()5=ycF z0G?z30Dpr306zYO{+= z`#)iNB7*;dI9u@&smm%52-`WD5U|p*&@mA4K@kuT@HiTqaw&<3{ZI3M9xsu(v$H)H zJ-wTo8=V_7ot>i@JtHS4Cp`lbJrfh{KLo9lhpn@LJFTq~@qd&2e>@^4PDYLv_Rbb| zwgms-H88Yuapol=`j626KL6cMXA9H+U&+?#f71GwApL)8=o#r4=>M1dUsIm{M7iW0 zElmC?|A(KCk>|gV|4;0Hba?3hBmRGN=D#!jujs#2`Ji~{|99BpLU({{-@k>*_x#(BuFtR1o$JtXp?|R z=^VL{<|gasfdG?YFkny+5|QHO|Hp14fk{G$mlo0+AN}X>V2n|_%sual!tMVdLT=~D z&MQhnfX1Um#FiD-Xpf-5?^GtvI+ z4$(<|?`ktQ3g0h1{3+@XtYfisR{x^vUA@(!)5GlahIh@2U^uH2DLKHADOK|riO8`W zgXX;l1UPu;FZF32>f}DLc)js37Y=6znA@w;TDKX#l7xu$1BDjYCmfE*I+Mv%^+NHs zAYX9MNnQI+4>4e=oJqRVxFph>kI(|2aNN|i;g7PANCaj->3XWZ00u&+ek%0|*JsX* zbbIoEr6X!#?+>O|!b_n=;0A>%m794AoetRX|A*C|&fGiz1FRI#r_fPNhOF>WK;ACTqI1l9d{w zvSNdU$E5Ol@Hl_(t?>gap0zH0g}t^2Q4-TDt%N?uxmXVaafw0f5}?br8Y zp=-2di3nm?Wv82f{QMlJ8`Ew9&X^VZj1v&Wjz&Z`Yy>S0=|HT2?p z%tr}_Y30|&CDiV_e#5Nx?ks(8WQUrJ&ju9Z#2$|`uR)o17Q)46zl3hU~4NoRT6zDRL_p~NzzC;O{jFZ zyhKoJE4rZ@X*#KFab5(DR_V056rcN;J;jGE$8tS|Qom{!&7I7NBogA!D}@${3HHE# zQ@${+O$m8>54Yry3C=9bl3G_ECblho|R} zf^}wY&beB|EG*I$Fdiw4GI2$H5=dxgC-;Q?bq4!7ST~0H>$anl%L8Gg^+lG~uhQ&HFz~)2@DfgZnlsBaMi&n+A!fJ|R``s#k)O!#j*bFp z7ZY3&j*z)T-g#@lek(uP-dwzq`A!)g(LlhyVvF?*bLQzVjuRo*>Z%BkRWLKT9O3nJ z?Ec9i@GiJ5;8NN8?9B6d`_1V!z=6<<5)f`HGazGqXB{t{zg0I1j*P7E$G!5ENb#QD z4s<{2w*qeiljWaS0hMMd2zY2hB+flafGOQSwDce)KQo+;vc3unKxJru+H+VNnpI z*tlT*IO_Ae&Zfu3uPSa@zmdVR?rIG-Bn9=cnxMew4(*AJr#au?@$tS}2ptO~Lpxcj z!opv;YaGgI#~=JbRo)ga{QX8B2@fcFpt3EyNNVRsBZOAcI1{~(G@mW2qOa%BsM|%p zv{jpJ$v)ofpY|Z0&bEQMzS$-aq5zO-7g#uF;+`p@1qxWKYrV-aW;qP}>^s~t1^F%Z zWMvB^RD5QrS|J`yHaiI5k`zpCS2OX65b{L&ml8#IJysMY(;4D)rx;kvl_qy)% z-x~Q=1NN;{bFk%6gQ~-c8*ouvnsfU&P8d{awnF+%>G;<(|1sEI$PN;XN zyBIG`c+gU(wPp5@XZS3#sbw{x_TwK2Sc+XRO#cKPRI5}IXC&gNd450__>F3z2 z>Z_yNyn=J&;sh;aA4>n3?v#sPpPhOp{ZZVEsPl?1Jln`8E&1*Nc#XjT;qQgAr&mUE zAh{tT@an4HlGJ8l$#(sUapC-m4Q1P=GoK=P3j^GBs7;3+^N8W2X5 z4L}yGDV8#a*l4!4<~RiLHCyZ-EKh2#5&(AOW&Y8Y@wX01r?_(3{9VGoZuj{UcNm&pwP+@m$br15&uU#2k*zJpin zglGqFWCYx$94Lb{F-=M0otf}X02^Iv9uOMxDl|*YMLjXFw zd9`-dYo;ouys!6p03p*Ec~M zI(&M^r|U3~`*$9deR~eNI)k0asH$I6AcnG29y*Vhz9+CMIdkuIyVIUn{A4Pwy_+6A z0y?+7CGc{y=G*3_YgmFWO^U9WseQ*X#Qsc&R?n1PCmItvh1lc*O5|EQm?9>U0VJ;g zJe9RSha*uou7OGJ0Oa$R@aiu^J@EY6Vg(?Tn@!B0X{`d00 zPzPi){h>JDkg{-O79p*kGSZ@rG1Nbw-@!5HCnDNTd!EUQO_Km_KJGtGo9{8oHB25C z)-axqG<7!Y-q<2}p85>ytIQHcKn26N+BiZEwQKgzAccoPD-+_V3}1gr5AWx- zdLQICmn_ji@sT=M=o(va2?#gI&Rb0trw`c5H=&!al9DGAQD1}oS}l`qvqmyVy%@Y# zB%ZqRd2_t=A-Gy>PGIT=rixlH@UposLNe>=-;N@4?;d)6k6biV>~O;$c1#?;jL}~3 zrib=Xv!>;sS`WCYIuvli^+{jR72y!`cgl9z!UCBvHWOnFj+l3ijzawmue1j3eR}c4 z%+0{80NZ%`OGtR|inaqGUIN{G2D#56Y|ujDpma@zI3xy|oxTMLMC-yEk*z=e1ZvdP zZv*=%mRHb&=kQOgREK?+W4;|-r+-cNW8cE|PYZt#gh?|bYs9&lmB=Y+A@iqZJfxg^ zR!h$7x?Rsgy4zl6BAV& zP05oqWpr&{Ccxzgl99g+Qg$M~PT|0VD6DRP}==B0WlG%+4(t1&; ziKr|)maUdEDXk;w`T6qr>NQ~Ic6st=u-8~|1VIxIHOm!GAx%P6m?k`TKKR4`g@p{W zR>ej2=2@F%u6NnT8<>F8V+R_%wTbj>t|01iWH`@k)yR}4$ZFyr9&`QCZh^ftwHuXl zq&K+$&sTiZH_n0Si@MKz^8(6^=>`|JYnvEzrOAPc&|mGS)@=oS{k{~Y`EVfxH)jQT z^Q_CFsal`tPp8=WC>Z^#X|741(5MeVRmW_Bn-d=&4y?T}e6tm}n>tG5>Wky*#Hm=J zOHcsZrBGocYrWxwGHB-wX_XsM+;vuhV}P5kcfjFi4w{(&7Dq#s>*d1IgT)%m5oXwv zY#$NmHf`(3vpJlfELOQQZ@$^qkq!V<_LDRssJ$#`N#c655h-eQSm*uWdbQlpB1T{t z4_&odkL8LxLm-21updF|ex2Tlpi^kM4;k+Nbnv_88uvvM4?>in{g)fEG==dG1uf6o z3+!QoaK&0Jne0%Yoo^^O+tlc7vt^#ETP;VZ0#5N-6lKSs11~Yavm*BnN4os%DQRFOxeh=XA&P z=6t|_?y8G`V4ac#ky0GZtnVn1+(VF=-g_GJ2gppEi-e@K!^^DD780#wgZ-rsuKjbe z&3xQI4#I>^QwU0=W#UJXijI))m2&5JL+wE1?pOHQF!!EfFH?oeoyhgey!x{ik7kt* z+#@+(@Jn?j%HOynS62t*V+o3xtb7u$7on%C9}v_B!Lr4Cu3Nx=`Ce6p>~^>P<$JPELRu3_c`Dlm$NTSq0%B znjY)!S(heUwONzvHPgPyz8PXLaPqNb(qg>PepX0|?IJ2MzMasAXwxhUqNu*MsK_9_ z-O5I@91cPMOF`=mq{wn+^!-8h6MbmR6U%C01>Q06n|_!3+<1ONHzlYY-+MseFpx@Y zR*F+8ERN0gp5Etu_WHe4m!N7qkWIvf!_HK4pyThmr^wYpuhWz=)x@A!X%h{Tj)H|2 zFQ{hpO{2r;sMF(DQzI*wX7p)l;;~YifsNIXqS_S0N4ktqzc-FlT~>N*LI;~rohB62 zdk#`qGmRQwg+OCcMBL_yEz!vj@BS<2af>LYfK)?b)qdv6??fWQ+U|t z9h!qD6*LpEtS%K__cDB2IJ-#Oaq+wD?N>s>C)BiukHEGHE$}AmgS8}yc*P!V&c>ph zHdZv$a4$Q#H7QfZ@hDiBlbQG5w%^S2uiCqEDv@RVlce4Kk^bxT!ou>rL5CB#r>=MP zAzV&Rm|x#(W!EW4oVe$>aVhCPbzUAkA22pC@VlMp#I_fqVoCa$8esulU4cpj<|j!6 zv_w@%8_kvh{@a;^U7ny*Z6pBx^R`Rom@)D@-Jvw^1dT6M7<;E> zS)e$|H&>O^?1i3*go?11i6ph>9xlLH6h1Ic6!v~Qu{2dDI=c`_buP1b*Q z>k?=7c-VE5I}Ge;{@T3f>*(fmKxpmGIX=2Wk?KD z)l@AX<5NarUBOu^)dW-FrXj6_jZ?U!@*k;^AnEKr1UQZboXBeh2ljo==1LI5bL3-i z1^|-2M_WE`hPp`z?BblXsRb2*md>H5MX4 z*mZ(=zWI{x>hQs~?JS@a>XKTdJ4zy~F=MevfG`hW4F}N#33ke8r=O8Oo#8!A_fOtU zw|mRZ$u`EHThQ1tzs!g?A2gYAxs4i3kKi+x0-|;b$322G*&x6P4{IvX9#0&xSb&54 zvwYt5=0w2a7a1J}0RZR}!IY$!8$B8+?`H}Tb}n&_vQV=f$-ay4Z-$30sN)fx)tuJD zt+!xAZpU62Zy{{qoJwtJw4Oq{#>S86YtRExa}=F)U9R5d!|ds10p`bMsvDFgD{?|a zK!j0_Dn;zzN7eJ~z^TxWyG-@z7&0UD)sWqGTEFvVe*_%TiAY`c^$@B`o0ECPo$+0% zkWd4t*(?U&`{9fS_mLz9V1nk4D453KibbUiu+b&S8`#=)cXEH#ewH@f;{bRmwKv?KXas@%CINV=&XrCH6LX zzVj=a?6jtn)XWGCjO$3k$M*!vHu~7AB{wZ z2ys8`kI|82OCwY}bJ+qG-w~^d4Q4cQk^%16o~GTPU{vNEhApmZ_C8G(+>MIBF3M3U zR3z-`R0ZY*)C`{k_(8q3C(VS~iJ{iOlV?;(k(aa&C~(V=Dx^DLf}3Z zD>HOC$`Y6g1u(xQv{ZX`e~rn#`}=g}{Sv>}(F9OKd^UfU+>S?XxphAc&{BZ~FN-Z= z%>C_jKcOA_TVmXsy?)f^kz3QPN+=rxVwRH({;dCMf%hqI$)_|3Lja*3n%#~LFdj438O)BoDv4NPe_^EXOxfE_JJ`TIxx26 z!rUl0)_3P?#OQkE6otApK>S^%GMR!_^9!jY_y>{9lxP@siv2?UDanjd=jp>@dm=Qq zfmDv-jwGwAT|WTs12^nMr(|qZ)~%*x?p)ts@f*AtgX_+B@F4s;k#$Ki{epj%1arr}cv`>v;iLR$hb!+~k5L=WpaRoW0VD0Xp zE~$#y6dQ*((*fs5$&C)70(5Q6$jsHcm)m{mubw5V9T2z1*V%9?yt%B zI0~}%*Qz?xc@5dTy$=mx4EzlvrEBd#6DdMd1spuT8wxzmw+!x!WwI~Cnx^ai33_;^ z9cHvTHa?$-Ik|qkftNKNs9x9|!~t8tJl}U~)Ov@KX-@~4;LA3OYM(ZJ!zq*??#tq2G_f#+`d`PV89>U5r?uu3LzVm zBC7q<@V*vHA08#J$Ob*%{dVT~%E{JTsTT|0Hr!!)W<0{2%G{4N_8yFNkoX_v zwgJi;)gF5M{xQ6ILSOwbB7dI5j!hLw^miS{Md^}TFpG*q?v93!Jroqm!~+JGLPz!P|M<>Z^ml_*5T@AS8!F+ z{yu(fU+wjTZTd}NUIFI2-*@9%G!X;Va3x-@n&m>@i?^r1+|vmIB?K9`xefZb!MH*| ztVXoV&-_%_t6^~jsGe_Bb{Z_DZI-7?u~b}m(gN4Ju29Kje3W|F-kkx}^(Y%nWW5j8 zC%(m?Pk1G>GGPyu?4>fk=8LxMO~ElG&WrvaR#_4kUykv1y^cfRsCQ2i)XO>(2wX0_ z@?qMk>-Cemlgj+0yzm>tBH+q!f;(h%k>TM-w*#x`tdAd&t_4j>OHxf9)C*^pyuELF zMVF9ggHTryt=msj;^Du8E|*)n0qtizXm{{YjkVLN4UCU7pV;@|^H%a@Z#MVk^~mA> zx_^;acizHzW9=Xj8sx9_@S^cYW&h@|3hUt~x&D)uZE(3bZ&{A+9D z;IsP2l=49SU9|B`LL^wJPyVoh!Lp_TWT7r$h~yaH+oBBiV*F$G->M7j`C{l4_3pB^ z*J!0_J?;0?nRdBtPf5-_xhSd?#4XNlBGE|%BrsFsS z2gor=8Y_K8OURLKOPh%NVRfahao)A28Sld!3qkH0l=9+((`(L7-D``b4Ic4kiX?R- z-nv-3+}Nl?(eL+z*4Y>FxnR(UlWs9`UVvhCdti7uBdk0V6u`9?kmF)#F zFYo65GxP*vl!X{YbpfER@hk4qYq8c4x#!&-(y|$zwej`aPJSd=XS0CJ%qvn}Itz5L zpxEl}k{!B?v%J5Od99HM@7LF*j8T8_v4DVJLR`|u*Z|jakX60KlDg;J81Z1ta5Ca; zZXV`Py~L$Ap)HPGH!Zl~SvVTT1lPYtD}D}lTPsVt4AO62J?%9 zt?Bk}n$Kvwfb=#97>*|EEqE2wjU16oCPoabLRk#3mtuVuxvu94p-SO9u(>JOYGipY zeaT!C3sr)XguMLe!|o`;rLg!yyvqZK7)%eCi`6R2a%6Pm=-F)VXchj+qDZ(%PJ{R} zyi|_GDm(b<)7f7Zq=>O7qqb8h%`ZL={tN|D-^FJ&qdwN{=>1F%8;jH;jRa5L<2l3- zvND%<4#d?MzR(o6JX!JTu?LPcvcnP?9(ympk0cHtoK1w1mkKH~Z8frfxjwU9r*5V* z#@tJtV>7d`ob3&xy|9?Ic4vMKu^D$0;P#o%Kq{G0XLZNmtt4rPcCLWfo!ykM*xdG= zs`h7HU3I74?lnM)39_B6!bNS|2Bxzmb*)bi+U*PXFI9DCh$h`tmRw;ymiJ2s65Tb%qp9uo?@r;}(=H4ru20`l11#sQ z!!#uUjAnA$@nfux?1df*>#hjc1A=+i0Q=#>GVUnh46m~tr%49AjklX_w1oGCgv-@| zUW~CSg9V_=^r0I3)$Bi5RJl(l^pYNb3K(2ATU3^=)WLbjASoanMArdzpj+PAEY!h& z-8;Y&)X4i2abfR_s=`P|creRi-l(e4Ki$sl2DP|3jO>NggLvyZdkH|y;1~?n*z;u== z^z~jhCovvxl#^tg+1L=!Zk&9z!@xPelXj={x)OqXDPQJWN~kzP{k40x1R`}YA1Hk| z9k^_B?nyJ)AwG#4TXbMvk4ZH1=@CnG`#9#HYRt=8sWo7S*9wYc){oy_DuUeT-O&*=rWWkZJ&5yTYPSxh)iR(77#$ zi%dMI9{DGZv!mdS@rVpBqq>>KinNPO!q{)bXm;Lh6Bq@MYZ^HECt6MS7gXPT2UsSg znCfy{Egq(c(H5(hZmCEKH6YWK0x>S5YriR`=5O}v5csaYtv!Ka;L4xy?KTl4&56IjDgzMNU z@NGaGJ6f2K0qa~o;jYx{P@4;>2*#T_u|dmi=qIpNRm%SQ(bz84JlsQ;6gMW^?Q`8h zr)_)aI(>9r(DWCof4`dd)VXLyg?}UJwj*)c_*EU}<-9($wv~u#WtCXIvhX{Zog$Tz zRb$n{#7|R7tUt5{XK6*VHQz6;owt3*r$(D~8b;7)w~Oe+#I!E9?TvC@4b z4UE`BF5b?ZmTklwx5lq!$lS;ebY~4YIZ0}ej%GvHVmcCP45?b2@OBB~MtNA!q*QO4 z_oxfYAG(S8iG$A&i%IbOU8#YZE>i%JQpz}zO{G8{pPp$hv@5V`Zu{HkXrP`e@o2A^ zgjcjUU(uWy{j?6_%pKBrNxciOxVOGaX7(9F=e(=y)OWf4#T9lFE-}7FR%XcQYfOC| zI81F3I3AKZTD#MjEsvSH&t9;nNj7#SeVr#~9*%^M;$g_D9s+@?z}>->p}P)HITpQC zejGb3v6r`H(dA3)mJiqLd~7-(_y#)tLfrPA(=uYb4k#x<)kET~k%jC7<~NY&l*<=V zvMrB^xt8)AI#(smvYewh46eY(3C#!2W_B`lnjG0v?5eboAd{8ri>t}bIzPtuV+nMV z_eZ#52cKf;Dz^y_@EPk6q(SUS?>qe`$r+ z9a~5e#RQQ0@o5?+`M!VY*!?&qXH5)xC-mnT&G4jJc$&?ntb-JAK4XjZeu1))#mA-W z+JFxWIcuc+g*T9)R>?gR`AQv1cXNJ^CZR=!UCEbp(TCK5F8X4bMLVv>31;6pKKtL~ zhaiNTYuP8s-W5wU=KMQeK9e0l+}mJ_!4rj+hjNCHy)JsnOHsS^^}zRi+3fRME1%`9 zE>|5vvC9E4Zh^v}dOt!>?WRnZU>GK`Q(HtQciPb`7HW^&NOb#n(DUaj2JZ@@(svVJ zSFE2VhLOOPZ0>_$vg%c}hscOlyETwc_&7=@EDJ@8rDnJdDMn$@_2D3B8A(?w1rwS^ zVMOwh4Vew8O*w&cezn|bQuFD2tXYskuFZNUV0OUt7WG&Wm?`i=VZD58f;}yM5bWt% z@^rL2ix5khP500Xl6=Q(s|?bXrw_F1^1RLF z{v-SdCiM;>`%``uDO@9Z4NKtfxIb>R#c!mYcN9PQ3^8E&Ekax)N8FW+Mo6lI6G5sp zoy2!I#sR-slCeYiVz~>44i=dqr0RlAChO7L9W%bx%pd8MEE4@}!d$0Ir0_5+Te%wx zi}SS(;Q&HSuLNua0~Esn%2bNWnKQ*x2{Q84tC{+vG2=+DeonBOasol*BAn4p7fpy!e2uTngxAOkMej0VUm7 z@K~d-F3@o#;Kf8Ux=Q1eSf+ARQHNXW3*V`8o~ z_nY1LjvLGfFBZle7yX;Ec#?t>JZsuM6)a5;ULx%ktyESL*|@hfh3+-q0!o}{bB}!; zz}XD326dY99r8y)Ai@$O9f4dI-&GsL95m6;PXK?li4g18&(2WD;8&tM&p0*L2j75` z$OJ;o9h~nt6py+NJ```-`}J>3;;_bGT;xbyr!nty8OBI{Z zfuG32=xzbZ~E!|}^|V50^C)w@r!wJm1p*c1dj z^2{VhuIO&Ysmb9ET-j*_$&bowzrNL|epb#P=G8C1GECF(9%uDoD z4a1NVAn<&!@*|DScqxI9o>c(e7x!eGvuzQPM3e+P~)8LIr`o%HZe9Pi;wBjdu1 z*-`zeXbt-v&ieb?%xsvdvFwECGq|bW8WMxHuQV}aHl27~d``N2&i%$M7_{nhTwR+y zI#Q~5tM1hGTk3B!#3kcElm8amGE$7qXS6q#bH)OX~3aDzX7E)ZT$YB{PRf!jp z_CHPw!os9m&2-J&U^QI-+?S$X*Qj8vnv2O76T=Ta&R5>wCd`=D>VQ{rN4GCTQ`Uu! z9_Q|(hA4(;!_{+4&q$IB2y@^}arzr~ozJC!>Wyq90p_Z9A7|szvDY-q3i@EB*{mf* zE%aNune{=!F{fq4ySMjK6+C3&)%p?_(tv`i`81mdXIoz)lKWqdFXGG< zmAE?BsY(X1Aab}JMbjAO@!a+vBC1#}gctx$5apEULuIN5z7Srrji#M4eJH8C;puD^ z;s{HJac4K$x8HZs${P(M`fR4XaY^tDL|1(wv<8YLe@Y0P^h~Qa*}%ZzsCFI1%-xLJ zZ^&goDKI2a9mhya$fhWN7LK=|nh?`czSei%Q0BcTPH~{Gu624wkINHsEDI0U zhe4Zf7UZW(q|A>{rURy$yfs^B;RLTB!gOf^>qY@Cv?#Jrf!cS^NNz$XmJ_@g?fAg} z*1O_v_zq?V4Ll(?UOD6~>M@hJBlR&PSv*ZHBVO7Wp6#^-yJL{wez*85J%jY>vF=Sh_@KKzh`#=&jwthQguzdpaVAlXBpB`V~<~4U)ZsYE81Ae0C@`yI)pH{n+RsDT3qzi>wg%(oGrGNqJjtEjMK8e z{5#hK-HDkmXX$kfPCO$J|MjG;xUtCq&EDvHdffJ+du5`H1#%BGmqv?5!cJ7Tb7wx~)O8f#`F z03=M8RuqBmq_oS7OAT*(9HQI$ha?w|9e24fhCUG8Mwn*$iQWv!I-ox9T33CICgkx2 z7WU}76Rs~ZW#~^!a$@U;(Myqe?<%vjMAGUVcI}{o<$T;+i3d8+d;FsANY(Ks&r3D1}Ni%S0#wUbmQSOxX&XxvNm*5z2)DS@7 z^($n&O-3>!(k-XQKFXqtiqHT#Dmu9CDDiH)n;B(oqyfnj1Hkm5^G~q>hCi23so;#C zQ1{P)C|!6&{>fl%$C?l{_dPbI-~6hHSc7yUBEHL z(&t)DkG6mT(;*-IxBf}xRwz_xmFBhH^u$1=2uoDlZRP)k6wgue78y#Zole?ux2uAL zt6o=YTb7sf8i(ubq+D1b#g0`C0^dKM{Vf7@>hiF?w4#f+iw%~=o4NJ*z%~7;4+Cc0 zZfXY;Q%tscjE{2wH9?W80hW&6h@Nep)MqaJIz-TwEue#+9q?bA^==!j(O-E`$Qq;D zFgMpyYd-|HM7GDO9JezY&Vb!gbE^!`$esFWyqsesHdjsY+Yf4(tUm}~{zG=K&^+8L zDLokC)TZrQ=&G7c8XG^MHh$2jZT#C#L6i^~vc-H{>R(Z`-Boi=m_mh1`J`X9%@@`m zo|*}9;c4r?=%Gla)i0}ccy{*+M4Z7Je2Ss!g=uCfYReRKgadl9zH_KiHY~su^&&*X z!W{~*F6S8|Pk+&(-~5)*Y7grBVaQj}0yWhl!YlICe9*DVNPlaF!DAH3a{E|DNM|ed_X6haig3K2UMPhYo-s%U6a(R{ zAcA~#dM*#|2_wZ-a{Og}gOwOl?R0D9oUzUr!N6Y2|i*-bF66>kVW?Hq0vcQ7RcGd4A)O zl`H*IqyYjf{R;zYOS)W1VW65bkod5iLo?g;&D-tJo{o4d$@odRWQia-yM^XC?TjX9 z?ogS26+wqn_LE%iaH|*srynRDWDgy1OPRJ^E+2vON6Sq#rT6;Xp=`;Sp@N-0wpCZ@ zY~0Mrlrnj{Mb)V3Y8ixavdF@*kktg_aPcQ>cyi^;A^z}zrP;^Qt^Sj~mmMz5+Mnxs z@Z4>{{FgT*zHrN6qS=K3J@rofor0aI2*-M2t}$ei45>cBObM3ID z?iiEQlPqQLAUX;*a&pENmQ&eQv6_(k<{T(VvvXQAEWCNjt&Q$ zen{Jy?1AW-fee*W>Sw~Ht`|ret@ad;+gkAwofuPGF-)#q%AC3c zX1;7!UUa~DuVDcZb$2#{v-NIX+n!eP>sMq-g@Q))OMP3nb{P6z){ul_73Mo!{l;|~ z9TcK+L!k&l0-oQKP`!5YDRU4u$kM)X+{@Qu##W`a+m=h%%4~ijN6Jcd2~jrb5=ckb zk>Kde+`r_yZ(|(z7Wz~ga+BI953&C2nSUj6?)uLh+|7=RRV*+~vq0EsrNkn2Y3E)m z=*Y|_tJe&d{9WWT(*0qaaW}X!GKy5N*s@PT)D7OgzIX3t%IwPM_-wDjk>E5)C=RVmzBhEX6aDZHPr%M^`3F5=~5bJ zeV;ccPu>(?GS>>{uBolt(Y0mg7&KZjc^_CtXjTa5J$ zzC%-$p|(`xFvcCO{Kc`Q@t$42AEvrY4^WEr{Xx6IdShv*m@~>~i?l`!N6-1#Q77ed zevOVQJuvG-vZdv_W~A>zYlVxbh8c?9zfWLz;Mdz*{GBoeKVCsey22-wrtu0{&FxsO znxw6o&@u^M_WSxFd^4ER43U^(=tPbf^ZB0l9)RQd9lA!rQZDS%h62vi|D!QbIDzEw zX+8zN)po}YV~Gknl(Rp{@YmHH#q6Qw8cp8H!lt*NgXEf9e?!a#xyPo##-`SV292m7 z3N%eg$Cmu=bImW$tF#nBcr+Iaa0iA#8#K3jl9sfU?BIAb)Il%J)D+j8Zx0`^LeF>n z#40wy#rx@99^?5$1%$m^NEp}cCQAWI3(dN(kyt!V_R63^(ac3u%yy^!I4lsVhWe=k6Qbssot^$6c27Ce zy4Y|0=-lb-2S19*gm(dZ9zYwczPGws-GD z@YNA=G07`BIX@E`5)izjv;KiEknvXjSnDcOnZEmH91ZD?PzPfkN^r5hiFZ-AEjd3Y1aRWH;Z%lvuNftj|wE6qVTy^r_ zdJb1WzVSXVCQGYM|3)!@h&Tq@K(inI`0eO7L2TqO@&groC6mx(u^7+|6bQw)v0wt% z*o66QxxkYB^ni&=$OKqK=1PIv-sml+ymlX%}CzCjDfLxic@uhxfCj9+;jVxN^~oP zKrncnh}Z*{QZi!jy5eMl|%O`|k0PRXfze zC;69y05~av5fzeb|CMEy}wqF8qAPhmOV95tgf7^qLxCn8ZC$*M5O9ZB~yZH zmH_#P{rBE+sDtX}$J@^)Dut+50oK;?O%snJr$xkHW3_RN3f!Uqf{s4`N-IN#vf41y z&&26&lr;Ztp~*^sRI1)F%rA536w)krcp)n>v^xlIaea9rv$s-(haX9LnS)Tycu=xt zZCpuxl4@yl#Gw(B7XZnG9&pK4spw%%%VcVRb$fM%({jHm1mHz%*Z|ju)^qIom~iu;27rK|E4;2?8DTRhqOm8b9ruidIxYY&6FD1uQ%lz_hAm zH$d+nj)#sOj5j1YjCRvf9@p9xw1fj>_;{p;Zgrz3%XqDu4_>W9uPE9!M-0!wbQr|# zPk~&f@Fp=2-Hd$Z@=0k3`w7R*1Zz#VrHsMiFRUVTA`b2pO#Pn#SQw}0Fc5WW_@R0u zf9&VX`^F2u@HYcD+}0dj$w@`g6IROunfgoCAcHOi4L*42FyeM5V*b+gToSD$Y=osY z@#tU4$dXcNMzDL$Ry_Xi;dtVK;l{+^T(DXEUF=b^EIGr8%~w+YRN$p#Mnta?S_ut$ zw8W|x=b~z4Fx)bnQe+07CHo7GgRjR=#=jo<0@5PMhH5XP?zu)c(-$Hn@u?XIVwCup z#p@8oEkWNO`KXyNnluoH_Yyh0`>`F|y{>5hcWxv<%e*ZCGDm6&wlQXFM2i|&G%W^w z_P>l5?;p&Nr9L>dhZ?}Dx%X?(snZk(EtD!A4x<(R0+VduYZAQ-_K=7h(yCm<#3`y1X9?k zrhK=VzlHo%XyAH2!k3y_Qd-I<5UUo^mJeEwQ)gY*A5pcdu_4&ms0G+eA*kYOjOG@S zLk9GAbnO?I(fL765_Of+*fF|rQwnC574sIof!m+@h}&^)LS#$sN=Fy#%r+5-BjJ1F zg-Lh=<7uW|1^rq_GJtM~nNd|w`7Tn0;!-my z)0l%#F^M^mJ@)h`@ug1=XE3Ux=x@P6NmF_U?*XXTBoO{IWE1hopx|=L{CMtSSdpFg zW%TXcHCvE+epoGRTKe5Gv>(~xis=3DM{K7U#@nchsx%Kj(Jlf9IEco7wvxYDEMMJ` z4s5l_GJ8@Wc#QIc?PBTG?vf?=!Lxxs!h8cr#K^I6a@o3I5AUC*Y~)D{z2WK$b0B>C zQj%~j+l@WkG)k(^9lH`}^tGMpd-V@!Tq;uiBu#ZE+2s0Nm0fwaZr7wu7|!D`1&#YNvIS8d4A#=b7Xe=W#uAb=$*{~4M#e7Q%Mwy)A$uk6rxvDF^9H7# z1s{+?rA-Xj%!ZdfT}f;98DBn+Prv*buRKeFL1)4$Ei0iGrN9`A@RB(9B>p6eyU{wR( z%#Fb$gZl8Z3HWllSzOlGG90DyTTpa zIJm+(RU&MaD0n;#vEc^jpbZcE7W8Rrf+YTRj?`)2&fAu+X!k1ZTfT9 zh#P3A)SOm@3-I#Se{em!jhT3oVw24Z^F%0ZJi^PBHQE|lpeIWz6hfF+VS zs}8xN4F+G=je%F1!H?@wrAz5Zf=`{^<|AqKvyb12D>}Et7vE1OJ3W_=NMwTi;U7ki zWe!m9&gG2fD%;B>VMPSvI60&vB!oNm9U;t`>C@4wcN07?{7&@XR*8XR-IXo{UVZO6 zSX*$%_jg~7#K3-CFmB>(!n*`XVELdiQiL~r{5{}D0i>U&BYP5;m`PX_W&Gqpa!h+P z^NTa_=LS{X7=*YRTGQA`HV#S-SLxu#+_=CW1DkM4+{oACg(vUEH9b3!tNDX)&L`Kj zlR2>6LVQs=$OFNQ@)gPTx;nes;si}S6REXGJ51Pisoe#eCkEWw3PafL63WY?Rq0ak zXfip^A3H#`xUmr}ty+f|a!K2jT z`l+7vNKl%QxJaIMu5|7v=lbHT``M%Btb6CaP$TGP8(Xucwo2&V_X@0~BzWnH4Om3? z@p;^OF^R@D4ht*ROXUb19w^@|5Lb1tL)Yj!Xu+)=)L{p`mOFSGkvAD{@CQA_HjqFLek2t*M#N~)#eX*Ka z7QwcZQS4XF+wOvm9AKLubnIOpEt}Nlw{@v)sBUB*g2mRmDDbHK&X-hs zm8YtD%;iTws5wNMt^SVefw424Y$&(3FQe#h&ys3rthnda`ZYq)qc$*iFq_R z+--}tqQf;U@Y=Hv(y?+a?TDU++o=&amb~9sr$)1@|N83;q*GS3<8V42r1tIeZ)@Y$ zd)~)h%8shk^T#eOU(a49Ic=uCEZ9lXjRBkoqdoT-X~T`NZX1?jG%h=KB_fH& zKL@#=&5^VWvuyFDJi*VeoY{e`8tpkG>6VI2f>WDJ199O0wMpz7g+JN`c5vZl;1y@0 z-_RQ}9203;I4aRy{HXbQ*q$YHUIq1oqFWTgk)18b@(oB z8ef4C(G@Cj(gVZL_sXBKjNC{#>GWxG0KUYl5#VR=Ti1r?IxU(@l5LA(QMYSDeD?O! zB;b+MK*r(b2cJU}s4mmfYZ ziil{ux#A$dj4yOgz$vMd+9XN0d<66>NOJx5QakziD&)v?a$%KfRUses6RWQzR#ib{ zR2|7x6p<`1j_iO`)h`*|^B_ohJGgOhD_mO^l`HP^e3|>`Y~1FpN$z`vJ#%oGYX>CQ zL{MZduU|pZ*|3R|y8KBvmN)N8deg4i7-Mz?eE6=R^myIm>>k&n6CQj0Wqk1XGkB?U zBlvOvr?NTBvOvNerR=`%5Rf?3>R1~;j`}wVNHsk)ZvD-gG{7WohV?1;lCBdLz^Hqf z@aHn4UCV|T(yO&GDD?66VCcjSm^kGlTzmCn*b^O!%H=&tNNBufEmr-hIpn`g5}`8W zJl!~-#$;SPvcK>}RO4XZwrC4l_HKwtpS@^y4Ainuv%i*- zlAEnr!=PS^7hu+`tyoO!?`0d}sFpuY0^)D>yHWco_2Uw1;*>Y&Q?(s?DDV%Z8NEL> zD_SbyP#QeRA)h8O3##Ug`BTX`J^VcG9@Ni#D^5p#IZE<98gO-2YN7TU2T|Q~`mfIZ znl6eHu><6<*KvR-?tBadUi6=*ga(S%&AL+K|uTxfBqDVErUOR0p%yxy0mBaxeMz?}!00mbQIGFhWQ~#5LFnMc(W>uSG&LX4JEk zL)63giN8@5-3<3U@;r{l9>JdFD==ty7koGBc|$-{PdVlpPk5eh+`Jv5-+7)f4NuY* zsV4q;-%zxyU!9)b=?r~9In3G^+KC)AT(Jm<4Wp@TJ-_>DB(U=^x;6~sZh528`^qj@ zzzFd(CVq}@JKcy!UVH?RR2ADK6M?=xJ7Ya9g5Uq_J1knh1~saNldCRk>Ty0Ia#5U! zE%<-qSR8!S{|07CYH@B1h=5pvkt>4|VR8DC^W1r-X#?)Dy!JCR?eGw7H7JLmabaW5 z^6O|J+@yXT&ia(E>`yjwGFd%KZX)f^DazPP+OZ#12ed~htz_G`Y=G}Sor~m+jQC!q zo3Vmb$V3UpW!0TVL&r>7Ay*0sCNX#zo)nIz(<1rnag#COt9Q}7*Dd(+KTp#PKFTOD zWDQ)cN|=#*NMUjJEjJhoW)oqJ6Qb&9>vS$`ln{|~VW#AM2?j2J0oTFhdTu|{J#i^I z%9D26>d_c<*R$x?y9b_p`bD&+)^1;7626+6Y^+1G=%iSIk;N)-;QP^dt(1osZoKU= zZbu75QsX1VaAgvb4*i>;Zuk|D(HTp?ZwpqES)u<8&yO;gQ4Fo6Gfyx$5{XI%qx}6b zWd%(UHek^0kK?5$@1uJ`gfY`k+?PzVinYd_L>px(30C=8DHVi(=)DpmA}o}o5)2eE z1|)pd(pFr`Ov)OnaKJ01;T;4%(8WCGAK0UPgnbS$!aP!HdkK z&^3Whf$mv|A*V5c*3)l%@HvJLzSbE zvsO?7BD7r;5s?(ZVcK%fl-d8ylr0mUn90G)EyZ`w-RizJ$< z2Vxty&xocu`OE)$2tBwp<;eHH#(@cAaAROJnnf`LVOkP0w`bxwo$oTpejK9b@1BM= z5l2I?pr(z@5@%oXLCjpX8ty;NLl1HiVKf~qUxnZL`@n}G1-#2K1S2<9PxSJ^?3FvQ z;o4r9`QUI|K@VajIfs@hT?PZW73j}XW^vohofu4G&z-vx=&_rEF6~-4fEVKLMG=u) zDMId9A19DZtM*6>KlQOAp zyK_)Cp`Xo91Fl5RF%!{e)*|#nSu|kSg`kKq_|dPXGJ_b^IdTYlUw8sb?|KA&{rl3Q zdp)8*cnKpqc4Gjps*IQF2tH3y4sz&l8dfdZiF7%}bOZti~?amqkTGkXa6^@8VB&}ag`X897O?_mOqDnb+bFE=c zRZC7vFi-#tSOT((QP0Ltnt{jf9%A+g+Daj6J(}t(Nlv1;BmvyZ_s$#oqEF|xn7w!< zrp#Z4Tfdo)f%|8pHJ2dO=(QYJHxR+C+N05|*?3}80*=!$^22uBOg>u^4&ugk<;GLem!wrkIr~<#2~ER zvK?!-#9_{k-ALo!&|b0o;U9Y(^I{he%yM{@-|Z)X+21@2;WxHKP5L-B=e{&IQ+TVG zCNg1HIz$(T+4Gi}07`OxY~HXI7A{?dE4eYav_B#kesm|ebx1#&<7=nI3u|Ib#(Dl+ zn21K{im2Hvea3&=b<^ND~ z77SFEZhPjyVVs^6m&;+hJS$jcXTPOofCi&nB6`xIbj8|DCcL1Gwv4Y5mpNKm?50+x zU+->4ExUB}1`0Xrn?O;G7^zM_B?YxQ%niAYa0zrQBLjC+^CB*W6u`vG@lkMSax)T! zS_3!>jbPBS>B!Ne+%9Ds+O%jw$H(UQYWy#z?1-??>{k_#OHPiCW+wY)p$-=&B6bXs zVtf2!C^->wBa}Lu^gl3VHzoyt_oJ_klUGK@a1*7>245-6y0B1KFM0XDivioG*?&C; zUWGF~JJP8o+d0xcy95X=A(6dDOKL#0vz!LCj1iUAb1z*D;z|^@DlMn~O;u`dbS=SW zJhd)*Q#(mkAd$FG60wQdnPAkj$rQY{rGuz6EDBN3|LX1-^X)_vV^v&-Y(gf@n1?&F zqtgj4O_rC^*9$;IoQk^Neqo|XQO5)oH~wQAmwfs7m-`B`=2h3k8HpOswyAI5;~ zx9r*;*_(vr)W$x`xp8qPmXK(c*H0XTgst*W*S3^HufNJvXC_k}D=I-k^ZO20C#vV?GziozFZo;qwDo65_1>A>bNdI2gOyRA{5D`&ic@d#@Bq|f9 z{Q-qm451NP%VrHOnftKt>R4j-?uw0)^AZdcAqFfV(QI!gB*n80=z#UAk?Y zv+tH%{ZC?`nESMAUs7Mwrsoli3z9^`vF)@Q(RI^?b#hR9oCGADvJV^ZNZ%{Air$F` z4Z^Q8{zUVp3@wlyPsegNl^N*jFl8-Qb>lW7AAdu_#Bc!A&S-fg?xt2vM>C{?6mZ!t zt#S&!gpDE2hu0>3 zcbpuEG&utI^Krl8wjq7YV>=0oV5}l1Qk?;vwI7fl157Dt38iENvOFrUq9h>7yKqEA zUzDU043uEtqA_3zi863Kw}V{~{91GAp=~#<0^=0J1MEuNXEu^*MgukNyq7_Zjm<+x z2~9gN&ags-T7xnX7?q)9^EVeo{Kt(2NjzJUN-$7@fy<16yxL^fLGKJpg_U4jXIi6b z6DfrnP=q>}AV_Gc=y^(c^D*~a2#9&982^+Ikzzz{N!BG8C~6G25}LeuyYd5;1IhbN zaa@-Wk>bE=NuDJbxC9JnZIHbR$;ri)cvxa_ISEA$Ao-k>5RrV4QgW*V10@(JI0j1W nL<)|pk~d2*P=bMcF!28XFIvMb0s$UN00000NkvXXu0mjf!Npj; literal 0 HcmV?d00001 diff --git a/docs/images/whiteEPCCtransp.png b/docs/images/whiteEPCCtransp.png new file mode 100644 index 0000000000000000000000000000000000000000..f4734a39714d2f1693a1fe65ab29f3122fb1750b GIT binary patch literal 11332 zcmbVyXH-+svo97z5CQ336bQX{37{aIfP&JC5DC2{lpsw-q=OKY4pO8D1Q6*ZB9PFL z8hYp`Ed+$ndGY_gymjAwYu)v}oUAjm_nz6aXV01WWul)LYTu?}ry?UGyRED9*qDs$ z8iI`M%FUZMNR*$$pLs|>Y`&W2z9wD}-vE24BiW;uUJi~tx*qmUj>eAmF9Usg9D!tH zB5S&jA3cLlZ_RD{&7kWf7aA>&!+eErn!Nc)MQujc`1T2tIr8B%@?;T7n>T-`49s3M zzLjL+%%&QYbKrUF67qyz$_}1W+mN+C-%@?OKc|)0Pj-g7JjaK{de{3A?H|yfLTTDU zGj_t%HZO>~mxrvWc4}tfVHbGp5M)y$C!-xcsQ~M*wIEW`#?_aJ7rk1d^h_w&4%|wu zNSm2J58Em9TPU2%jB(R?AdjKi9s=AOqleMO0q3{Wa6}1K&T$rGa&+4O@?r7(chJ*!8DeTJ%y#Q?FCPD(i09{3_@K>j zI+wWv<3_T0&kGfHbv$0qtNdfwrNI^orHsjOUb9onMTK5$#k8jP?HQR&*&>JxS?}12 z^DKSA8~g9T6}hnKh7+SBf8ozkKzM#Zg_PWZQqaVJR4&E-z#Dt%QaD_M+cS=$770mK zxnYVEZM_``bj}V++H=BEr@e+p)%l!E#^;(WucOg>1xHJifm>?r`8XRI3T)33XpT^I z6C1k#OI?{i!M5b3uiVLa1b+EtZ;|2GpU+p!{zC1H&017c+pJ|)s?Xgx;z9|Ko$2Q_ z=DYIdRl#w*uxLnfBtI^34kO$jO23yockt}5w0qQhT^om2mhGoX<~HjXpL>bh0Z7ZV zvKZuMKjJhxa)04n93(j?QwNl|*>@C?Uf8A@;cN1h zLQGsd``qRkXkDmTEPff7;vZl2mSCOx**cKl@CwIdHvibhjCWM6zR=k)Ok(=RT1`;W zU+!*&!GM2Xo5z5sRq{37W8bfH!AP5D6h_-S%%O`=Vli)m+C zFfT4;p`@%;EYt-MJAv1-tQE$TqEX%N7R~jQG}91dj!KjXPh+H3GNH?h)<9=~zR{12 zWGKs(a>!~f-${X}zkcsXRJFrWUjdsVCauT`1!@ZBU3UgVOD_g*(Mviirz(Ckq&AK0 z%N40$jg>&#(d~+^j{d#X4@=FD$tpmO8Zall=19$FKTl#-?hlkZzRMr}bq%B}cx)b3 zxzQh9FRW%7*6Y4z{LBnnIxdH>FjS=X&Zc?0d}rqx7&mjF!~bXJQDz5l*{=8^z7m$| zunvd28Ga3IGcj@ofD|A5&Cj`wFsl1UZ91{=*aQdm4Ek;`1#WrD#1mIG9&kKTKAGZY zb#(CeeQCKicOFVOfUQCwH?Nrpg-$4+Dc7=`g@E^=n9cT4PN5=zQt@CDD*_Xm6_zD! zg3U{hlV2P=i6{xVLM%P4xsB|juucu751lyQw|8WK<+>-_^|pa$`Hxq90t;@NPX(U` zvvrP*Ghlu#n#QT%*9u40zl1-E6N_js(R}7{OD?m0y|N;!uFRf#JOjfXxP^w7Y;SjU zSnoT2h~*xUJy=E^Hth@{^B76qRx%Nio2{nNi@WM z75X9Ikc+mAvQq;U_2*d}1nSFwm(n$S*@U?MU>Fx>)qWcE2Js=P>&s8Y9*Qdh?!4|f7p zJVmjCCn4mPoB-5Fv}_P$g;Z$5>INj(=KYC&t{n9V-gT#hdggShq-i-N4~ zErDYF2937UlsBQ1eK8L1wI4Gj;u%ouG|0Lod$tYp$0k@NLm3Mv-YG*#lwYnTAaf>k zh-I!bV;}FbR9co~%_6;XXz|0EnktQxq4oVYWT1_*E(XH(>c_{#Y)bl8kaZW)G%(J z^F&c}mb7Rr)PeXCp6u*0`?$7Sk=vH}mwEi$!*y${i=Z4LQP>pf&Np-ev;z4_T>q+1 z)hPcFuVLZEJFa6Z@E7+MZ`)k)d#JQ@?8!VH*l|@5OGx<^HCX$m#KZo3e8i{$<{<}d zSv01@-=;Wk%zzoEvu0W-+HoMoBaz!3wkJf`6f-vHZ?w+sd-SF(x&=kqy~^$)h!@X- zoiv5p54ZRSd3p%L+yq@MShFKrp|i>o4apv33j4Ut{C< zCHFkWO4K_@9n^MzN!nuJW^Rwph_tQGGMq_ik(^#%;0!&yxZdwZ*ewae;k0?KZSXzt7dayd;epdlCX5jYva69xK{d1{tc`RK4ujqjz)@-O|WSWlkCiYKmm z-==M@=I7=v#%0@aOeFdz4a;S@Ymin78_98clGeJV8Z&!tJ%nXrY7+3&-RsK8N`4BO zxR(fyYutKOJi!|e_TIAok?`HVW9 z0r(sq6Vzgv7#bRUWSiHF>zrud>S1QKz0sTmwp(8Q5Go~F%%TkP2(|b}vG5IaM;xuE zMM^WfL(R=C$zxN&jomMi=|&!T4`l*tEOf$rm&|F?D#BgZbbkI2O_7;u1m~M zgb-(A33s_7z6;!irrQlT`mF5f{PKWKO1ZL#XG?RaaYK)Cru}8U4jUL+^)6iqD_+pe zNEg}+Y~(KT%%2b1%Z0hhMeB_0X6cN+Cd0YkbON{J)>-xn2dhJXifdf> zSHxQuCfoe7i0E37_1E#*&uPE?KL0P_YqziP3!MZr`2s(aMFb;iwf!SR|i%S$wR)b^O@bN#3yCA=AB9x zT7h!Ms-vzGCk%B}#nqdZZMTPq3Sug^?=J$sQE?BYy}B}oo&XkXN*S@@LwH^6`Lq3& zf_}ZDbZ2x1pzI#$)aKVw4-T*Mt?oSU<%*c*=&2=rq*LOxD|{w)T8b>`K4SKgnxb6- zE}ht{BQ10S=-YqBz<+1I@3kWTZdQRw zG!~EQ1+TT)2e%IJansOug-3jBvF1mTe+>;amN_1Hssp3GzgnZQV#QO7*=0|)<5kK# zO<7`V28+0ASQ=nTHOLB&$C#ZsG%{ytwAD z@s?=ntMW6RcFsI+@t_0)f=bHzon?{3!6ASmWkFtWnps`~Mno2|IFs=oQ-aZlEZHABGvC$_=;38zv)un+)7vmx_OBbK7yt#275G zP;5?@9Iz1`$&z$Acqr&!kS=P~KW;5zb|9Zpo!l)7XB)BsMp{=ot^`?)^xbh_8I)QF zE_F^=te{~1b7_;!f46I`*tDL@rEr5s3{*=b6Vu<#XhC@m%NT-^7M@or2O|7-IicGQ z$P0zMxbY?bNGDq#6C=*p?%TuW1g2)BRRon?zWU!l@k1aoeYPNtVbH>C%eWX6RiFHP zL&ipm{nZ8wY_aH_Vr*l6_L2@Cp;Ps1pe4K?-G3TIG4S#|GpI&-pg zMb>H_`wO64IF4%jB!#3{JfR6Slf`a80SnY*^H4E4CcMQ21zKTmJ8JUO8+h5jbEyb7 zgt|I_fcq~3Myi|tJcyO8NB6VP4y%GkAD=W&+}pwOB$@V;H$jY+6(L!5fll^m23pPV z#_}q)VFTuosMzNFe95LP zqPjVh5$Rw#FuF)>%;@0-1qnDt=P*KADe;k6IC#3zJ+v>(f~|j?uNyZAIL7u?{%K z25aMU;RZjs^V1wB(nv`_B_e{$|Irn~;et(d>FP35hVt4;^i}GqQUW=7YJlkytMYrK znpF_gz09_n)-0K$4Z(Sddh9R7?$lJCv5HVk^DP|QBJJX zsOJ9IATkJEslMfd{xN19ERK*jII9E4!Dw7c~JS27%TjIsG?Q@A?5GM z*fa}3e_a#7-tjT@%j)U|n=QpzZDgMMgXQNYU1HZrg6G0wvc*w5aV zQ$SRkB0!3>AEPiiItkVq08a)qE*nVgVtA#q7wq*mHO{*R8_+rD(+T+Igm|4YxDyqL zrCfR>qx7wjGBQpeCxkf@hNbl})x6UzhThNc7+wag%tE-@aM{+8=OL{a4LOq2cm+l_ zFWI+_Kc+#=gOg*SB(YNTD9XrFQCQB;=r~C zXK;*z!`J;`iGC%!n<}yh7eJQ6ds^kDl9&CMoN|TtG_;&DV3ua7#3ppqpD&69pPM|l zG)GxsH=O~ur2K#4(!AlXFPMhGU$;xjNNa<)Y$DQPR~98a(BT5LaSu ztmX8ZyN=O@XF0MGk5R10{HDv(=&9)sd2|Eam*=dpZJ*OcAD0(A+NIl+yzu!}lLq<#T&Bm;Fkef`ta#ZuQp!n(slrrQcI1Tj(5=Ej4Q@TZ?Q}Hb9!8n z${IHAEFf6Y9bQ-+Fq=Vze0mxJ;n0YP!~C*mz0^6Ltag8zk_=3gbh2*hCrx;6p!^mC z_Rumkuc0HB-9o?cOQT==16fhbi9g%7V*J7=B~Q7+^~>M_yfrRT*0_@MJ>zF(Vi&t! zRLyz>rb@AZap?*vhXy?`g#~`~YuDVF^xsD zT7sc!f2b|92yy4~bl_%$GY6{XCP+RdpY0fh57dR{XCySrdkUOYjHT|D&pn`W&7ipz zHwW@Aq-`w6auW}k%}B~GH_lLo1r;T|8^(;Y`bd$1Q%V zQVaqN{yR14ws6UdR2>bWjH_X5wKH1wT@KM0>n=9_Xad0Op$wbj&o?9eRHuRbetMi{ zk>p(-r@@ox4-Y+viM9IF8M*^}DL4bqxA@kA{_5Hx1}T60h11CgbhIR6ud>wBw~aFX zt^dC+#q<^)#zx_B(9=io#B9~(Z&2xIzN6YA3}_&s{sDNBymj-pG3pDoB|RU(yD z79l0TB?!{JCCX7Bd&efaX1MZN3bU2lKh76`Z&Y3(Ir9J?SlCh9a|^F$3N%TsHo}@) zUm4)@j!b4Y=R_5Z1C-$|8$+IQ_Cby^s&goCvytLd}KgE!(huPM2LIdw3)GoQ5o;$Lh$Ej*QIT zzL=DT{GidBtR-rx?A}+3>M>Mt2E5@)sdBrttB<}ZYDxAQLQlr+-miHp(URguQ1wg)02jl8_npnd9#tl9J(*X zFTga&UP~#iQH@!yXbkB?$@Z@}1H8Vo5m4LKDmoiHgFHy7yHdt8Zy?f^GD26b(RF-W zV$1KmMv^FO-~OIe&$*~&>-Lh1_0@m32}b#1L{oK@12&wW!o^53_Ef`l(m^>^q+jQG z#5*;OO<0=jsd)*}vln0IZU3mX`icddavx zA6~TV+S>)yTl1{SQifNa#ISlP97d5l11_c3Y5)KZR#bNcJqDXInu62PAN+9{fFo3g5To5WWGzlZNu}Cuwd2jc1Uxw6@ zrfKW{o@seUA&X!&1Fv&;$#j-&L)yX2q|M3x(fJ`Mt_(<#MBS##NV^a3VVTO(gEcdI zYvx1T_7ur|Y^!c&^f*rdI0Z9U3knIc6Ia(J8{L_K1qMp0@n<+MmMqb#*N{CgskTIC zNcRuttqYS8IJ?MtaJ07vdq=o>`6~A>g9+I`=B-x=niuHM!E@J6h_n{rEb`vlCc~X8 zq&fqwN^|c@cwvH%EaXiy<#HHq#ol|2>9*t0(|-W~%yjR{5H$yD{IXVxWCGf@!CJFH zi& zvcM22ZirpU^nL*Z?Yc`RGAwsV-@Sh>E_IQ^Tdj4^p|it&w^r$MIyEkVCw}gUus_Lk z?%jT~<)kqp)i-cm9nasPFrK6vb;%4S-yGY0Gu$7ZdT&=!Dkm_8kX&yY7xA`-+o^?xp0|Lk ziJcD|JnM68mKH<2MVmGt(i1VrIo%zR5yR%8+_~$rFpEx>F6P%|-6)fC)X=c1r_9}z zion!9Re*3ln9DDl;?|GZaxUUI>{#1Iv@nL01~rlJ~)>r`VOuCX5+vgF2e5mv-RPO%pS+6nzxR`cQeVu zWy(}`pf5-H3;uA#UDIbw|RfS|BecT!G9{m zp)#Qz+0rVQ1ob8YfpF)&*l~|;z`}n zG6vke9I7hitcgl^KGS7cazAWqcYtp>#^ zd^8c?o4+A(GrcLTc}yyIU|Mr)PPbZKteV-yGBPNtBCYs_JF|h&% zYbj^KB{6J9w8W}kvyTzh1lfxgzdv8qu@Db85%k1mdMBrmYbO+~g>;g?n+1zO3OIst zc#(Cc(TzOQ@=2Aq5+dnDAz#IMxltKCKRu^`aNe9HdE9mDrpdC%!kU{tiSp|?vojE) zG_=Fs=ew$q;nVG9iyn_nx_|& z*brIG^727%a{)lOEortxWt9m=#hQd!RD7A|kXR}0!_Wnct7%nRbqoP-F>5mo(gtt} zanC=#Xcet0+0^^}h=hJ1sXA2$Q(Wnt)EEq7uJ-B&_y_Q{eN9?AGB52>>GF(Y`5>5e z*MVt?tH<`;QSmL~af*~c+_bs*gzvqujH8E5Hwn2)=^}8DBCR03giRDurLHL7X# zMhKFmd0-|01Ud5v-y!vS>}Acgc(a!D_&JS|EGcvI|H^VL%Mn!CO-5d zcf+%KGm}l!3P$IbiS0|jpY567wQ9^kTD=q|5@xG-iy<&QAF>BA zv(ZS`kTjp-3UCO^ZK!IcYP-QV-b&xwuLMmqyDnLKNZfNfxj%TfZ)0^kkuxb>u;QJh*FBPL15MqLvaZLN6Pq*Wo4^z}q zv*ygEj2E!M?8ohy(+D(jKVWI`zgrd)sXD|7p{GSCdlaS5RxH>-=q0_vvr{k+C(>eNJ)t;45eQql!c|#~%SBvs};3ZFu)N@yNF?HJ^_~ES#SVu@P+!Zey@`Ivts*%d&V=P)9M1d+>k#>PrKTwffQhyHc$Ht*3am9 zKu>toFwX>6Ztp2RKR1%TJeNx*_7RTod4+8z?J|hj*j{j>s>TCO6O*9*P07S{AWl@n zCRVm_!mk6Kwn4xKUO5h6%aR{8FqJ1zhgYh3n(p73EOO(%WN1?flj-}rKBn_YP0FaT zTTHB7e`j?_ce~-$U8J3XblH(Dr+;AiI|i*aO&fTrh{V|27N;;3MnRrhz@a@Q`2sLGw=fWf!;7tLiqgtyBcOxUU2ZBmoC z)rj_3JP9JM8&rSlydOtA*`8TT3&b|8*uMUbf#Kdgz_CsbdQ|?bpEK`iH9UX1lN>WL z&Xedtb(}uK`fC1MNyI(QK&Vcyd~+IpGDbMTHmJ)x)*v7An zu74$x&*2NaLuP|VyJl14GWx>B6EYr&i#$KQUnZmyHjDM+mrDr&W(6#xDmOdBtr;R9 zP&Ec4wW_s<^DPQ1-9`z~=^?whTr*y|^qNP@NBYEd`;z5u^f&z<>^&~BDIHQF^C2f) z>9l!ql|{~oQ+zwJCxxRIryi&*9WtBkzPi$fOq=)3?Mqr)ZqwP>pW1I%!+tKRubY^E zLbGhITA~d`Yvk&mG{EJf64@0?OVi_||AhXb>PhCBmkOj2_@R}T3hdD ztQ++dtY7}kw-R{uGbJvUOi&I`Y`G=;8SlB9;l013o!fzI3J#gMY;6=ssi{tP!Ah$5icCYY@(!W^8QM}P zCt{O`|{th_cEdo4TZG}v?&@N+ciY^@;*8Yv| z%f`X$ek`n;0qT}$ipIDmuJnag5-ao03gGqCWc>5cr>D9cZS~YS2PpiB z_4^0RGL5*VKDi}(iZ&-G@Z59UEj52*Wl!BcldsP9x;AGqgnvHSdLQ@pB)#3Eb)^j|b~2Ecq>ej;~lh0FI|R72Q%=4R6`R5{&#mpNdMU6|WO(tG{;`T-FmlEXU$Ju@NhqH)pZb_-kHx8#?cdPh~t&uQPi?J1O>bHJ~ zswEdw^ToYA8f!(Ul1v8<{@^(^6Ha&!92huLU) zH+IA~_MG@WDa(TS9b<8un0!}nQd(YK}xTUk0goiTDCy$cYkAdRqA zz5Jqp$Ei=8d(5BI`yvn&uH1T|tj?tE*ZDCUaaPeEHa^=?^WusC%jS#f=~}1ex_xGn zrN&P_24eU|CVdfcyvYZ1*wpp}v%#LZ8m$BKZhMP?K!420h60u7w~}pH1RvXx1ofFS zDERV|DfyGu2^-^j<1Fod-JJuR9mp|bx;<&)Y{@LfL2>t2?mPMxWJY z{+44Meep#xF%25OMMLCGZ{JYA=QyuK)g zG6<~|**vyuxD2YeEi2%Nb5`1c&b=obw40ASB1v6t?F#OqnR){E5Y*M^sm&c3QSahebK3|!i`R>Cw6rhb zu%*vlPW(fi)ZfxRCw85k*hqVj85sE2FPoHkBtN|e%z8)DO!+(T&442gc&Hn&e3<0h~l{XyQ1qpHg#vHcBYRvqat*cliIb(czF@}1RG5;z(?M& zrOFcCIqMsChMj@mAeAnbqT_z`s(zuzoc=yd*?M+S0++^Bl!R4ah&-Ck5p8oLj303rnWi6Fgp@L|GD9DtJ}}{c_lXP z+G}LL46b_fDhrT3G$nNrl6qK4?_~eMOv(O(mHtCg$;bxyNDpVgmH!7@medLR-?GUv zbV;255BC3-_}|U`KNR{e?Ek9o|Cs#`A^yL1`=8nWV`K0bsWp=pA#=AN^~j!~$gYt& XXj)kQp;?h85y^Bl4Id-bU%dTat&Dz# literal 0 HcmV?d00001 diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..118bf288 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,39 @@ +image + +# Cirrus + +Cirrus is a HPC and data science service hosted and run by +[EPCC](http://www.epcc.ed.ac.uk) at [The University of +Edinburgh](http://www.ed.ac.uk). It is one of the +[EPSRC](http://www.epsrc.ac.uk) Tier-2 National HPC Services. + +Cirrus is available to industry and academic researchers. For +information on how to get access to the system please see the [Cirrus +website](http://www.cirrus.ac.uk). + +The Cirrus facility is based around an SGI ICE XA system. There are 280 +standard compute nodes and 38 GPU compute nodes. Each standard compute +node has 256 GiB of memory and contains two 2.1 GHz, 18-core Intel Xeon +(Broadwell) processors. Each GPU compute node has 384 GiB of memory, +contains two 2.4 GHz, 20-core Intel Xeon (Cascade Lake) processors and +four NVIDIA Tesla V100-SXM2-16GB (Volta) GPU accelerators connected to +the host processors and each other via PCIe. All nodes are connected +using a single Infiniband fabric. This documentation covers: + +- Cirrus User Guide: general information on how to use Cirrus +- Software Applications: notes on using specific software applications + on Cirrus +- Software Libraries: notes on compiling against specific libraries on + Cirrus. Most libraries work *as expected* so no additional notes are + required however a small number require specific documentation +- Software Tools: Information on using tools such as debuggers and + profilers on Cirrus + +Information on using the SAFE web interface for managing and reporting +on your usage on Cirrus can be found on the [Tier-2 SAFE +Documentation](http://tier2-safe.readthedocs.io/en/latest/) + +This documentation draws on the +documentation for the [ARCHER2 National Supercomputing +Service](http://docs.archer2.ac.uk). + diff --git a/docs/software-libraries/hdf5.md b/docs/software-libraries/hdf5.md new file mode 100644 index 00000000..5bb926eb --- /dev/null +++ b/docs/software-libraries/hdf5.md @@ -0,0 +1,16 @@ +# HDF5 + +Serial and parallel versions of HDF5 are available on Cirrus. + +| Module name | Library version | Compiler | MPI library | +|------------------------------------|-----------------|-----------|--------------| +| hdf5parallel/1.10.4-intel18-impi18 | 1.10.4 | Intel 18 | Intel MPI 18 | +| hdf5parallel/1.10.6-intel18-mpt222 | 1.10.6 | Intel 18 | HPE MPT 2.22 | +| hdf5parallel/1.10.6-intel19-mpt222 | 1.10.6 | Intel 19 | HPE MPT 2.22 | +| hdf5parallel/1.10.6-gcc6-mpt222 | 1.10.6 | GCC 6.3.0 | HPE MPT 2.22 | + + + +Instructions to install a local version of HDF5 can be found on this +repository: + diff --git a/docs/software-libraries/intel_mkl.md b/docs/software-libraries/intel_mkl.md new file mode 100644 index 00000000..7784066d --- /dev/null +++ b/docs/software-libraries/intel_mkl.md @@ -0,0 +1,112 @@ +# Intel MKL: BLAS, LAPACK, ScaLAPACK + +The Intel Maths Kernel Libraries (MKL) contain a variety of optimised +numerical libraries including BLAS, LAPACK, and ScaLAPACK. In general, +the exact commands required to build against MKL depend on the details +of compiler, environment, requirements for parallelism, and so on. The +Intel MKL link line advisor should be consulted. + +See + + +Some examples are given below. Note that loading the appropriate intel +tools module will provide the environment variable +MKLROOT which holds the location of the +various MKL components. + +## Intel Compilers + +### BLAS and LAPACK + +To use MKL libraries with the Intel compilers you just need to load the +relevant Intel compiler module, and the Intel `cmkl` module, e.g.: + + module load intel-20.4/fc + module load intel-20.4/cmkl + +To include MKL you specify the `-mkl` option on your compile and link +lines. For example, to compile a simple Fortran program with MKL you +could use: + + ifort -c -mkl -o lapack_prb.o lapack_prb.f90 + ifort -mkl -o lapack_prb.x lapack_prb.o + +The `-mkl` flag without any options builds against the threaded version +of MKL. If you wish to build against the serial version of MKL, you +would use `-mkl=sequential`. + +### ScaLAPACK + +The distributed memory linear algebra routines in ScaLAPACK require MPI +in addition to the compiler and MKL libraries. Here we use Intel MPI +via: + + module load intel-20.4/fc + module load intel-20.4/mpi + module load intel-20.4/cmkl + +ScaLAPACK requires the Intel versions of BLACS at link time in addition +to ScaLAPACK libraries; remember also to use the MPI versions of the +compilers: + + mpiifort -c -o linsolve.o linsolve.f90 + mpiifort -o linsolve.x linsolve.o -L${MKLROOT}/lib/intel64 \ + -lmkl_scalapack_lp64 -lmkl_intel_lp64 -lmkl_sequential -lmkl_core \ + -lmkl_blacs_intelmpi_lp64 -lpthread -lm -ldl + +## GNU Compiler + +### BLAS and LAPACK + +To use MKL libraries with the GNU compiler you first need to load the +GNU compiler module and Intel MKL module, e.g.,: + + module load gcc + module load intel-20.4/cmkl + +To include MKL you need to link explicitly against the MKL libraries. +For example, to compile a single source file Fortran program with MKL +you could use: + + gfortran -c -o lapack_prb.o lapack_prb.f90 + gfortran -o lapack_prb.x lapack_prb.o -L$MKLROOT/lib/intel64 \ + -lmkl_gf_lp64 -lmkl_core -lmkl_sequential + +This will build against the serial version of MKL; to build against the +threaded version use: + + gfortran -c -o lapack_prb.o lapack_prb.f90 + gfortran -fopenmp -o lapack_prb.x lapack_prb.o -L$MKLROOT/lib/intel64 \ + -lmkl_gf_lp64 -lmkl_core -lmkl_gnu_thread + +### ScaLAPACK + +The distributed memory linear algebra routines in ScaLAPACK require MPI +in addition to the MKL libraries. On Cirrus, this is usually provided by +SGI MPT. + + module load gcc + module load mpt + module load intel-20.4/cmkl + +Once you have the modules loaded you need to link against two additional +libraries to include ScaLAPACK. Note we use here the relevant +`mkl_blacs_sgimpt_lp64` version of the BLACS library. Remember to use +the MPI versions of the compilers: + + mpif90 -f90=gfortran -c -o linsolve.o linsolve.f90 + mpif90 -f90=gfortran -o linsolve.x linsolve.o -L${MKLROOT}/lib/intel64 \ + -lmkl_scalapack_lp64 -lmkl_intel_lp64 -lmkl_sequential -lmkl_core \ + -lmkl_blacs_sgimpt_lp64 -lpthread -lm -ldl + +### ILP vs LP interface layer + +Many applications will use 32-bit (4-byte) integers. This means the MKL +32-bit integer interface should be selected (which gives the `_lp64` +extensions seen in the examples above). + +For applications which require, e.g., very large array indices (greater +than 2^31-1 elements), the 64-bit integer interface is required. This +gives rise to `_ilp64` appended to library names. This may also require +`-DMKL_ILP64` at the compilation stage. Check the Intel link line +advisor for specific cases. diff --git a/docs/software-packages/Ansys.md b/docs/software-packages/Ansys.md new file mode 100644 index 00000000..7f76bc16 --- /dev/null +++ b/docs/software-packages/Ansys.md @@ -0,0 +1,107 @@ +# ANSYS Fluent + +[ANSYS Fluent](http://www.ansys.com/Products/Fluids/ANSYS-Fluent) is a +computational fluid dynamics (CFD) tool. Fluent includes well-validated +physical modelling capabilities to deliver fast, accurate results across +the widest range of CFD and multi-physics applications. + +## Useful Links + + - [ANSYS Fluent User + Guides](http://www.ansys.com/Products/Fluids/ANSYS-Fluent) + +## Using ANSYS Fluent on Cirrus + +**ANSYS Fluent on Cirrus is only available to researchers who bring +their own licence. Other users cannot access the version +centrally-installed on Cirrus.** + +If you have any questions regarding ANSYS Fluent on Cirrus please +contact the [Cirrus Helpdesk](http://www.cirrus.ac.uk/support/). + +## Running parallel ANSYS Fluent jobs + +The following batch file starts Fluent in a command line mode (no GUI) +and starts the Fluent batch file "inputfile". One parameter that +requires particular attention is "-t504". In this example 14 Cirrus +nodes (14 \* 72 = 1008 cores) are allocated; where half of the 1008 +cores are physical and the other half are virtual. To run fluent +optimally on Cirrus, only the physical cores should be employed. As +such, fluent's -t flag should reflect the number of physical cores: in +this example, "-t504" is employed. + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=ANSYS_test + #SBATCH --time=0:20:0 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Set the number of threads to 1 + # This prevents any threaded system libraries from automatically + # using threading. + export OMP_NUM_THREADS=1 + + export HOME=${HOME/home/work} + + scontrol show hostnames $SLURM_NODELIST > ~/fluent.launcher.host.txt + + # Launch the parallel job + ./fluent 3ddp -g -i inputfile.fl \ + -pinfiniband -alnamd64 -t504 -pib \ + -cnf=~/fluent.launcher.host.txt \ + -ssh >& outputfile.txt + +Below is the Fluent "inputfile.fl" batch script. Anything that starts +with a ";" is a comment. This script does the following: + + - Starts a transcript (i.e. Fluent output is redirected to a file + \[transcript_output_01.txt\]) + - Reads a case file \[a case file in Fluent is a model\] + - Reads a data file \[a data file in Fluent is the current state of a + simulation (i.e. after X iterations)\] + - Prints latency and bandwidth statistics + - Prints and resets timers + - Run 50 iterations of the simulation + - Prints and resets timers + - Save the data file (so that you can continue the simulation) + - Stops the transcript + - Exits Fluent + +## Actual Fluent script ("inputfile.fl"): + +Replace \[Your Path To \] before running + + ; Start transcript + /file/start-transcript [Your Path To ]/transcript_output_01.txt + ; Read case file + rc [Your Path To ]/200M-CFD-Benchmark.cas + ; Read data file + /file/read-data [Your Path To ]/200M-CFD-Benchmark-500.dat + ; Print statistics + /parallel/bandwidth + /parallel/latency + /parallel/timer/usage + /parallel/timer/reset + ; Calculate 50 iterations + it 50 + ; Print statistics + /parallel/timer/usage + /parallel/timer/reset + ; Write data file + wd [Your Path To ]/200M-CFD-Benchmark-500-new.dat + ; Stop transcript + /file/stop-transcript + ; Exit Fluent + exit + yes diff --git a/docs/software-packages/MATLAB.md b/docs/software-packages/MATLAB.md new file mode 100644 index 00000000..e7f806b3 --- /dev/null +++ b/docs/software-packages/MATLAB.md @@ -0,0 +1,602 @@ +# MATLAB + +[MATLAB](https://uk.mathworks.com) combines a desktop environment tuned +for iterative analysis and design processes with a programming language +that expresses matrix and array mathematics directly. + +## Useful Links + +- [MATLAB Documentation](https://uk.mathworks.com/help/index.html) + +## Using MATLAB on Cirrus + +MATLAB R2018b is available on Cirrus. + +This installation of MATLAB on Cirrus is covered by an Academic +License - for use in teaching, academic research, and meeting course +requirements at degree granting institutions only. Not for government, +commercial, or other organizational use. + +**If your use of MATLAB is not covered by this license then please do +not use this installation.** Please contact the [Cirrus +Helpdesk](http://www.cirrus.ac.uk/support/) to arrange use of your own +MATLAB license on Cirrus. + +This is MATLAB Version 9.5.0.1033004 (R2018b) Update 2 and provides the +following toolboxes : + +``` none +MATLAB Version 9.5 +Simulink Version 9.2 +5G Toolbox Version 1.0 +Aerospace Blockset Version 4.0 +Aerospace Toolbox Version 3.0 +Antenna Toolbox Version 3.2 +Audio System Toolbox Version 1.5 +Automated Driving System Toolbox Version 1.3 +Bioinformatics Toolbox Version 4.11 +Communications Toolbox Version 7.0 +Computer Vision System Toolbox Version 8.2 +Control System Toolbox Version 10.5 +Curve Fitting Toolbox Version 3.5.8 +DO Qualification Kit Version 3.6 +DSP System Toolbox Version 9.7 +Database Toolbox Version 9.0 +Datafeed Toolbox Version 5.8 +Deep Learning Toolbox Version 12.0 +Econometrics Toolbox Version 5.1 +Embedded Coder Version 7.1 +Filter Design HDL Coder Version 3.1.4 +Financial Instruments Toolbox Version 2.8 +Financial Toolbox Version 5.12 +Fixed-Point Designer Version 6.2 +Fuzzy Logic Toolbox Version 2.4 +GPU Coder Version 1.2 +Global Optimization Toolbox Version 4.0 +HDL Coder Version 3.13 +HDL Verifier Version 5.5 +IEC Certification Kit Version 3.12 +Image Acquisition Toolbox Version 5.5 +Image Processing Toolbox Version 10.3 +Instrument Control Toolbox Version 3.14 +LTE HDL Toolbox Version 1.2 +LTE Toolbox Version 3.0 +MATLAB Coder Version 4.1 +MATLAB Distributed Computing Server Version 6.13 +MATLAB Report Generator Version 5.5 +Mapping Toolbox Version 4.7 +Model Predictive Control Toolbox Version 6.2 +Optimization Toolbox Version 8.2 +Parallel Computing Toolbox Version 6.13 +Partial Differential Equation Toolbox Version 3.1 +Phased Array System Toolbox Version 4.0 +Polyspace Bug Finder Version 2.6 +Polyspace Code Prover Version 9.10 +Powertrain Blockset Version 1.4 +Predictive Maintenance Toolbox Version 1.1 +RF Blockset Version 7.1 +RF Toolbox Version 3.5 +Risk Management Toolbox Version 1.4 +Robotics System Toolbox Version 2.1 +Robust Control Toolbox Version 6.5 +Sensor Fusion and Tracking Toolbox Version 1.0 +Signal Processing Toolbox Version 8.1 +SimBiology Version 5.8.1 +SimEvents Version 5.5 +Simscape Version 4.5 +Simscape Driveline Version 2.15 +Simscape Electrical Version 7.0 +Simscape Fluids Version 2.5 +Simscape Multibody Version 6.0 +Simulink 3D Animation Version 8.1 +Simulink Check Version 4.2 +Simulink Code Inspector Version 3.3 +Simulink Coder Version 9.0 +Simulink Control Design Version 5.2 +Simulink Coverage Version 4.2 +Simulink Design Optimization Version 3.5 +Simulink Report Generator Version 5.5 +Simulink Requirements Version 1.2 +Simulink Test Version 2.5 +Stateflow Version 9.2 +Statistics and Machine Learning Toolbox Version 11.4 +Symbolic Math Toolbox Version 8.2 +System Identification Toolbox Version 9.9 +Text Analytics Toolbox Version 1.2 +Trading Toolbox Version 3.5 +Vehicle Dynamics Blockset Version 1.1 +Vehicle Network Toolbox Version 4.1 +Vision HDL Toolbox Version 1.7 +WLAN Toolbox Version 2.0 +Wavelet Toolbox Version 5.1 +``` + +## Running MATLAB jobs + +On Cirrus, MATLAB is intended to be used on the compute nodes within +Slurm job scripts. Use on the login nodes should be restricted to +setting preferences, accessing help, and launching MDCS jobs. It is +recommended that MATLAB is used without a GUI on the compute nodes, as +the interactive response is slow. + +## Running parallel MATLAB jobs using the *local* cluster + +The license for this installation of MATLAB provides only 32 workers via +MDCS but provides 36 workers via the local cluster profile (there are 36 +cores on a Cirrus compute node), so we only recommend the use of MDCS to +test the configuration of distributed memory parallel computations for +eventual use of your own MDCS license. + +The *local* cluster should be used within a Slurm job script - you +submit a job that runs MATLAB and uses the *local* cluster, which is the +compute node that the job is running on. + +MATLAB will normally use up to the total number of cores on a node for +multi-threaded operations (e.g. matrix inversions) and for parallel +computations. It also make no restriction on its memory use. These +features are incompatible with the shared use of nodes on Cirrus. For +the *local* cluster, a wrapper script is provided to limit the number of +cores and amount of memory used, in proportion to the number of CPUs +selected in the Slurm job script. Please use this wrapper instead of +using MATLAB directly. + +Say you have a job that requires 3 workers, each running 2 threads. As +such, you should employ 3x2=6 cores. An example job script for this +particular case would be : + +``` bash +#SBATCH --job-name=Example_MATLAB_Job +#SBATCH --time=0:20:0 +#SBATCH --nodes=1 +#SBATCH --tasks-per-node=6 +#SBATCH --cpus-per-task=1 + + +# Replace [budget code] below with your project code (e.g. t01) +#SBATCH --account=[budget code] +# Replace [partition name] below with your partition name (e.g. standard,gpu) +#SBATCH --partition=[partition name] +# Replace [qos name] below with your qos name (e.g. standard,long,gpu) +#SBATCH --qos=[qos name] + +module load matlab + +matlab_wrapper -nodisplay < /lustre/sw/cse-matlab/examples/testp.m > testp.log +``` + +Note, for MATLAB versions R2019 and later, the +matlab_wrapper_2019 script may be +required (see 2019 section below). + +This would run the *testp.m* script, without a display, and exit when +*testp.m* has finished. 6 CPUs are selected, which correspond to 6 +cores, and the following limits would be set initially : + +``` none +ncores = 6 +memory = 42GB + +Maximum number of computational threads (maxNumCompThreads) = 6 +Preferred number of workers in a parallel pool (PreferredNumWorkers) = 6 +Number of workers to start on your local machine (NumWorkers) = 6 +Number of computational threads to use on each worker (NumThreads) = 1 +``` + +The *testp.m* program sets *NumWorkers* to 3 and *NumThreads* to 2 : + +``` matlab +cirrus_cluster = parcluster('local'); +ncores = cirrus_cluster.NumWorkers * cirrus_cluster.NumThreads; +cirrus_cluster.NumWorkers = 3; +cirrus_cluster.NumThreads = 2; +fprintf("NumWorkers = %d NumThreads = %d ncores = %d\n",cirrus_cluster.NumWorkers,cirrus_cluster.NumThreads,ncores); +if cirrus_cluster.NumWorkers * cirrus_cluster.NumThreads > ncores + disp("NumWorkers * NumThreads > ncores"); + disp("Exiting"); + exit(1); +end +saveProfile(cirrus_cluster); +clear cirrus_cluster; + + +n = 3; +A = 3000; + +a=zeros(A,A,n); +b=1:n; + +parpool; + +tic +parfor i = 1:n + a(:,:,i) = rand(A); +end +toc +tic +parfor i = 1:n + b(i) = max(abs(eig(a(:,:,i)))); +end +toc +``` + +Note that *PreferredNumWorkers*, *NumWorkers* and *NumThreads* persist +between MATLAB sessions but will be updated correctly if you use the +wrapper each time. + +*NumWorkers* and *NumThreads* can be changed (using *parcluster* and +*saveProfile*) but *NumWorkers* \* *NumThreads* should be less than or +equal to the number of cores (*ncores* above). If you wish a worker to +run a threaded routine in serial, you must set *NumThreads* to 1 (the +default). + +If you specify exclusive node access, then all the cores and memory will +be available. On the login nodes, a single core is used and memory is +not limited. + +## MATLAB 2019 versions + +There has been a change of configuration options for MATLAB from version +R2019 and onwards that means the -r flag +has been replaced with the -batch flag. +To accommodate that a new job wrapper script is required to run +applications. For these versions of MATLAB, if you need to use the +-r or +-batch flag replace this line in your +Slurm script, i.e.: + +``` matlab +matlab_wrapper -nodisplay -nodesktop -batch "main_simulated_data_FINAL_clean("$ind","$gamma","$rw",'"$SLURM_JOB_ID"') +``` + +with: + +``` matlab +matlab_wrapper_2019 -nodisplay -nodesktop -batch "main_simulated_data_FINAL_clean("$ind","$gamma","$rw",'"$SLURM_JOB_ID"') +``` + +and this should allow scripts to run normally. + +## Running parallel MATLAB jobs using MDCS + +It is possible to use MATLAB on the login node to set up an MDCS Slurm +cluster profile and then launch jobs using that profile. However, this +does not give per-job control of the number of cores and walltime; these +are set once in the profile. + +This MDCS profile can be used in MATLAB on the login node - the MDCS +computations are done in Slurm jobs launched using the profile. + +### Configuration + +Start MATLAB on the login node. Configure MATLAB to run parallel jobs on +your cluster by calling *configCluster*. For each cluster, +*configCluster* only needs to be called once per version of MATLAB : + +``` matlab +configCluster +``` + +Jobs will now default to the cluster rather than submit to the local +machine (the login node in this case). + +### Configuring jobs + +Prior to submitting the job, you can specify various parameters to pass +to our jobs, such as walltime, e-mail, etc. Other than *ProjectCode* and +*WallTime*, none of these are required to be set. + +NOTE: Any parameters specified using this workflow will be persistent +between MATLAB sessions : + +``` matlab +% Get a handle to the cluster. +c = parcluster('cirrus'); + +% Assign the project code for the job. **[REQUIRED]** +c.AdditionalProperties.ProjectCode = 'project-code'; + +% Specify the walltime (e.g. 5 hours). **[REQUIRED]** +c.AdditionalProperties.WallTime = '05:00:00'; + +% Specify e-mail address to receive notifications about your job. +c.AdditionalProperties.EmailAddress = 'your_name@your_address'; + +% Request a specific reservation to run your job. It is better to +% use the queues rather than a reservation. +c.AdditionalProperties.Reservation = 'your-reservation'; + +% Set the job placement (e.g., pack, excl, scatter:excl). +% Usually the default of free is what you want. +c.AdditionalProperties.JobPlacement = 'pack'; + +% Request to run in a particular queue. Usually the default (no +% specific queue requested) will route the job to the correct queue. +c.AdditionalProperties.QueueName = 'queue-name'; + +% If you are using GPUs, request up to 4 GPUs per node (this will +% override a requested queue name and will use the 'gpu' queue). +c.AdditionalProperties.GpusPerNode = 4; +``` + +Save changes after modifying *AdditionalProperties* fields : + +``` matlab +c.saveProfile +``` + +To see the values of the current configuration options, call the +specific *AdditionalProperties* name : + +``` matlab +c.AdditionalProperties +``` + +To clear a value, assign the property an empty value (*''*, *\[\]*, or +*false*) : + +``` matlab +% Turn off email notifications. +c.AdditionalProperties.EmailAddress = ''; +``` + +### Interactive jobs + +To run an interactive pool job on the cluster, use *parpool* as before. +*configCluster* sets *NumWorkers* to 32 in the cluster to match the +number of MDCS workers available in our TAH licence. If you have your +own MDCS licence, you can change this by setting *c.NumWorkers* and +saving the profile. : + +``` matlab +% Open a pool of 32 workers on the cluster. +p = parpool('cirrus',32); +``` + +Rather than running locally on one compute node machine, this pool can +run across multiple nodes on the cluster : + +``` matlab +% Run a parfor over 1000 iterations. +parfor idx = 1:1000 + a(idx) = ... +end +``` + +Once you have finished using the pool, delete it : + +``` matlab +% Delete the pool +p.delete +``` + +### Serial jobs + +Rather than running interactively, use the *batch* command to submit +asynchronous jobs to the cluster. This is generally more useful on +Cirrus, which usually has long queues. The *batch* command will return a +job object which is used to access the output of the submitted job. See +the MATLAB documentation for more help on *batch* : + +``` matlab +% Get a handle to the cluster. +c = parcluster('cirrus'); + +% Submit job to query where MATLAB is running on the cluster. +j = c.batch(@pwd, 1, {}); + +% Query job for state. +j.State + +% If state is finished, fetch results. +j.fetchOutputs{:} + +% Delete the job after results are no longer needed. +j.delete +``` + +To retrieve a list of currently running or completed jobs, call +*parcluster* to retrieve the cluster object. The cluster object stores +an array of jobs that were run, are running, or are queued to run. This +allows you to fetch the results of completed jobs. Retrieve and view the +list of jobs as shown below : + +``` matlab +c = parcluster('cirrus'); +jobs = c.Jobs +``` + +Once you have identified the job you want, you can retrieve the results +as you have done previously. + +*fetchOutputs* is used to retrieve function output arguments; if using +batch with a script, use *load* instead. Data that has been written to +files on the cluster needs be retrieved directly from the file system. + +To view results of a previously completed job : + +``` matlab +% Get a handle on job with ID 2. +j2 = c.Jobs(2); +``` + +NOTE: You can view a list of your jobs, as well as their IDs, using the +above *c.Jobs* command : + +``` matlab +% Fetch results for job with ID 2. +j2.fetchOutputs{:} + +% If the job produces an error, view the error log file. +c.getDebugLog(j.Tasks(1)) +``` + +NOTE: When submitting independent jobs, with multiple tasks, you will +have to specify the task number. + +### Parallel jobs + +Users can also submit parallel workflows with batch. You can use the +following example (*parallel_example.m*) for a parallel job : + +``` matlab +function t = parallel_example(iter) + + if nargin==0, iter = 16; end + + disp('Start sim') + + t0 = tic; + parfor idx = 1:iter + A(idx) = idx; + pause(2); + end + t =toc(t0); + + disp('Sim completed.') +``` + +Use the *batch* command again, but since you are running a parallel job, +you also specify a MATLAB Pool : + +``` matlab +% Get a handle to the cluster. +c = parcluster('cirrus'); + +% Submit a batch pool job using 4 workers for 16 simulations. +j = c.batch(@parallel_example, 1, {}, 'Pool', 4); + +% View current job status. +j.State + +% Fetch the results after a finished state is retrieved. +j.fetchOutputs{:} + +ans = + +8.8872 +``` + +The job ran in 8.89 seconds using 4 workers. Note that these jobs will +always request N+1 CPU cores, since one worker is required to manage the +batch job and pool of workers. For example, a job that needs eight +workers will consume nine CPU cores. With a MDCS licence for 32 workers, +you will be able to have a pool of 31 workers. + +Run the same simulation but increase the Pool size. This time, to +retrieve the results later, keep track of the job ID. + +NOTE: For some applications, there will be a diminishing return when +allocating too many workers, as the overhead may exceed computation +time. : + +``` matlab +% Get a handle to the cluster. +c = parcluster('cirrus'); + +% Submit a batch pool job using 8 workers for 16 simulations. +j = c.batch(@parallel_example, 1, {}, 'Pool', 8); + +% Get the job ID +id = j.ID + +Id = + +4 +``` + +``` matlab +% Clear workspace, as though you have quit MATLAB. +clear j +``` + +Once you have a handle to the cluster, call the *findJob* method to +search for the job with the specified job ID : + +``` matlab +% Get a handle to the cluster. +c = parcluster('cirrus'); + +% Find the old job +j = c.findJob('ID', 4); + +% Retrieve the state of the job. +j.State + +ans + +finished + +% Fetch the results. +j.fetchOutputs{:}; + +ans = + +4.7270 + +% If necessary, retrieve an output/error log file. +c.getDebugLog(j) +``` + +The job now runs 4.73 seconds using 8 workers. Run code with different +number of workers to determine the ideal number to use. + +Alternatively, to retrieve job results via a graphical user interface, +use the Job Monitor (Parallel \> Monitor Jobs). + + +image1 + +### Debugging + +If a serial job produces an error, you can call the *getDebugLog* method +to view the error log file : + +``` matlab +j.Parent.getDebugLog(j.Tasks(1)) +``` + +When submitting independent jobs, with multiple tasks, you will have to +specify the task number. For Pool jobs, do not dereference into the job +object : + +``` matlab +j.Parent.getDebugLog(j) +``` + +The scheduler ID can be derived by calling *schedID* : + +``` matlab +schedID(j) + +ans + +25539 +``` + +### To learn more + +To learn more about the MATLAB Parallel Computing Toolbox, check out +these resources: + +- [Parallel Computing Coding + Examples](http://www.mathworks.com/products/parallel-computing/code-examples.html) +- [Parallel Computing + Documentation](http://www.mathworks.com/help/distcomp/index.html) +- [Parallel Computing + Overview](http://www.mathworks.com/products/parallel-computing/index.html) +- [Parallel Computing + Tutorials](http://www.mathworks.com/products/parallel-computing/tutorials.html) +- [Parallel Computing + Videos](http://www.mathworks.com/products/parallel-computing/videos.html) +- [Parallel Computing + Webinars](http://www.mathworks.com/products/parallel-computing/webinars.html) + +## GPUs + +Calculations using GPUs can be done using the `GPU nodes +<../user-guide/gpu>`. This can be done using MATLAB within a Slurm job +script, similar to `using the local cluster `, or can be done +using the `MDCS profile `. The GPUs are shared unless you request +exclusive access to the node (4 GPUs), so you may find that you share a +GPU with another user. diff --git a/docs/software-packages/altair_hw.md b/docs/software-packages/altair_hw.md new file mode 100644 index 00000000..cff10196 --- /dev/null +++ b/docs/software-packages/altair_hw.md @@ -0,0 +1,173 @@ +# Altair Hyperworks + +[Hyperworks](http://www.altairhyperworks.com/) includes best-in-class +modeling, linear and nonlinear analyses, structural and system-level +optimization, fluid and multi-body dynamics simulation, electromagnetic +compatibility (EMC), multiphysics analysis, model-based development, and +data management solutions. + +## Useful Links + +> - [Hyperworks 14 User +> Guide](http://www.altairhyperworks.com/hwhelp/Altair/hw14.0/help/altair_help/altair_help.htm?welcome_page.htm) + +## Using Hyperworks on Cirrus + +Hyperworks is licenced software so you require access to a Hyperworks +licence to access the software. For queries on access to Hyperworks on +Cirrus and to enable your access please contact the Cirrus helpdesk. + +The standard mode of using Hyperworks on Cirrus is to use the +installation of the Desktop application on your local workstation or +laptop to set up your model/simulation. Once this has been done you +would transsfer the required files over to Cirrus using SSH and then +launch the appropriate Solver program (OptiStruct, RADIOSS, +MotionSolve). + +Once the Solver has finished you can transfer the output back to your +local system for visualisation and analysis in the Hyperworks Desktop. + +## Running serial Hyperworks jobs + +Each of the Hyperworks Solvers can be run in serial on Cirrus in a +similar way. You should construct a batch submission script with the +command to launch your chosen Solver with the correct command line +options. + +For example, here is a job script to run a serial RADIOSS job on Cirrus: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=HW_RADIOSS_test +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=1 +#SBATCH --tasks-per-node=1 +#SBATCH --cpus-per-task=1 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# Replace [partition name] below with your partition name (e.g. standard,gpu) +#SBATCH --partition=[partition name] +# Replace [qos name] below with your qos name (e.g. standard,long,gpu) +#SBATCH --qos=[qos name] + +# Set the number of threads to the CPUs per task +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK + +# Load Hyperworks module +module load altair-hwsolvers/14.0.210 + +# Launch the parallel job +# Using 36 threads per node +#  srun picks up the distribution from the sbatch options +srun --cpu-bind=cores radioss box.fem +``` + +## Running parallel Hyperworks jobs + +Only the OptiStruct Solver currently supports parallel execution. +OptiStruct supports a number of parallel execution modes of which two +can be used on Cirrus: + +- Shared memory (SMP) mode uses multiple cores within a single node +- Distributed memory (SPMD) mode uses multiple cores across multiple + nodes via the MPI library + +### OptiStruct SMP + +- [OptiStruct SMP + documentation](http://www.altairhyperworks.com/hwhelp/Altair/hw14.0/help/hwsolvers/hwsolvers.htm?shared_memory_parallelization.htm) + +You can use up to 36 physical cores (or 72 virtual cores using +HyperThreading) for OptiStruct SMP mode as these are the maximum numbers +available on each Cirrus compute node. + +You use the `-nt` option to OptiStruct to specify the number of cores to +use. + +For example, to run an 18-core OptiStruct SMP calculation you could use +the following job script: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=HW_OptiStruct_SMP +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=1 +#SBATCH --tasks-per-node=1 +#SBATCH --cpus-per-task=36 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# Replace [partition name] below with your partition name (e.g. standard,gpu) +#SBATCH --partition=[partition name] +# Replace [qos name] below with your qos name (e.g. standard,long,gpu) +#SBATCH --qos=[qos name] + +# Load Hyperworks module +module load altair-hwsolvers/14.0.210 + +# Launch the parallel job +# Using 36 threads per node +#  srun picks up the distribution from the sbatch options +srun --cpu-bind=cores --ntasks=18 optistruct box.fem -nt 18 +``` + +### OptiStruct SPMD (MPI) + +- [OptiStruct SPMD + documentation](http://www.altairhyperworks.com/hwhelp/Altair/hw14.0/help/hwsolvers/hwsolvers.htm?optistruct_spmd.htm) + +There are four different parallelisation schemes for SPMD OptStruct that +are selected by different flags: + +- Load decomposition (master/slave): `-mpimode` flag +- Domain decomposition: `-ddmmode` flag +- Multi-model optimisation: `-mmomode` flag +- Failsafe topology optimisation: `-fsomode` flag + +You should launch OptiStruct SPMD using the standard Intel MPI `mpirun` +command. + +*Note:* OptiStruct does not support the use of SGI MPT, you must use +Intel MPI. + +Example OptiStruct SPMD job submission script: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=HW_OptiStruct_SPMD +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=2 +#SBATCH --tasks-per-node=36 +#SBATCH --cpus-per-task=1 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# Replace [partition name] below with your partition name (e.g. standard,gpu) +#SBATCH --partition=[partition name] +# Replace [qos name] below with your qos name (e.g. standard,long,gpu) +#SBATCH --qos=[qos name] + +# Load Hyperworks module and Intel MPI +module load altair-hwsolvers/14.0.210 +module load intel-mpi-17 + +# Set the number of threads to 1 +# This prevents any threaded system libraries from automatically +# using threading. +export OMP_NUM_THREADS=1 + +# Run the OptStruct SPMD Solver (domain decomposition mode) +# Use 72 cores, 36 on each node (i.e. all physical cores) +#  srun picks up the distribution from the sbatch options +srun --ntasks=72 $ALTAIR_HOME/hwsolvers/optistruct/bin/linux64/optistruct_14.0.211_linux64_impi box.fem -ddmmode +``` diff --git a/docs/software-packages/castep.md b/docs/software-packages/castep.md new file mode 100644 index 00000000..f42eafde --- /dev/null +++ b/docs/software-packages/castep.md @@ -0,0 +1,67 @@ +# CASTEP + +[CASTEP](http://www.castep.org) is a leading code for calculating the +properties of materials from first principles. Using density functional +theory, it can simulate a wide range of properties of materials +proprieties including energetics, structure at the atomic level, +vibrational properties, electronic response properties etc. In +particular it has a wide range of spectroscopic features that link +directly to experiment, such as infra-red and Raman spectroscopies, NMR, +and core level spectra. + +## Useful Links + +- [CASTEP User Guides](http://www.castep.org/CASTEP/Documentation) +- [CASTEP Tutorials](http://www.castep.org/CASTEP/OnlineTutorials) +- [CASTEP Licensing](http://www.castep.org/CASTEP/GettingCASTEP) + +## Using CASTEP on Cirrus + +**CASTEP is only available to users who have a valid CASTEP licence.** + +If you have a CASTEP licence and wish to have access to CASTEP on Cirrus +please [submit a request through the +SAFE](https://epcced.github.io/safe-docs/safe-for-users/#how-to-request-access-to-a-package-group-licensed-software-or-restricted-features). + + +!!! Note + + + CASTEP versions 19 and above require a separate licence from CASTEP + versions 18 and below so these are treated as two separate access + requests. + + +## Running parallel CASTEP jobs + +CASTEP can exploit multiple nodes on Cirrus and will generally be run in +exclusive mode over more than one node. + +For example, the following script will run a CASTEP job using 4 nodes +(144 cores). + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=CASTEP_Example + #SBATCH --time=1:0:0 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load CASTEP version 18 module + module load castep/18 + + # Set OMP_NUM_THREADS=1 to avoid unintentional threading + export OMP_NUM_THREADS=1 + + # Run using input in test_calc.in + srun --distribution=block:block castep.mpi test_calc diff --git a/docs/software-packages/cp2k.md b/docs/software-packages/cp2k.md new file mode 100644 index 00000000..805bd716 --- /dev/null +++ b/docs/software-packages/cp2k.md @@ -0,0 +1,92 @@ +# CP2K + +[CP2K](https://www.cp2k.org/) is a quantum chemistry and solid state +physics software package that can perform atomistic simulations of solid +state, liquid, molecular, periodic, material, crystal, and biological +systems. CP2K provides a general framework for different modelling +methods such as DFT using the mixed Gaussian and plane waves approaches +GPW and GAPW. Supported theory levels include DFTB, LDA, GGA, MP2, RPA, +semi-empirical methods (AM1, PM3, PM6, RM1, MNDO, …), and classical +force fields (AMBER, CHARMM, …). CP2K can do simulations of molecular +dynamics, metadynamics, Monte Carlo, Ehrenfest dynamics, vibrational +analysis, core level spectroscopy, energy minimisation, and transition +state optimisation using NEB or dimer method. + +## Useful Links + +- [CP2K Reference Manual](https://manual.cp2k.org/#gsc.tab=0) +- [CP2K HOWTOs](https://www.cp2k.org/howto) +- [CP2K FAQs](https://www.cp2k.org/faq) + +## Using CP2K on Cirrus + +CP2K is available through the `cp2k` module. MPI only `cp2k.popt` and +MPI/OpenMP Hybrid `cp2k.psmp` binaries are available. + +## Running parallel CP2K jobs - MPI Only + +To run CP2K using MPI only, load the `cp2k` module and use the +`cp2k.popt` executable. + +For example, the following script will run a CP2K job using 4 nodes (144 +cores): + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=CP2K_test + #SBATCH --time=0:20:0 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load CP2K + module load cp2k + + #Ensure that no libraries are inadvertently using threading + export OMP_NUM_THREADS=1 + + # Run using input in test.inp + srun cp2k.popt -i test.inp + +## Running Parallel CP2K Jobs - MPI/OpenMP Hybrid Mode + +To run CP2K using MPI and OpenMP, load the `cp2k` module and use the +`cp2k.psmp` executable. + +For example, the following script will run a CP2K job using 8 nodes, +with 2 OpenMP threads per MPI process: + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=CP2K_test + #SBATCH --time=0:20:0 + #SBATCH --exclusive + #SBATCH --nodes=8 + #SBATCH --tasks-per-node=18 + #SBATCH --cpus-per-task=2 + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load CP2K + module load cp2k + + # Set the number of threads to 2 + export OMP_NUM_THREADS=2 + + # Run using input in test.inp + srun cp2k.psmp -i test.inp diff --git a/docs/software-packages/elements.md b/docs/software-packages/elements.md new file mode 100644 index 00000000..dcf69b5a --- /dev/null +++ b/docs/software-packages/elements.md @@ -0,0 +1,105 @@ +# ELEMENTS + +ELEMENTS is a computational fluid dynamics (CFD) software tool based on +the HELYX® package developed by ENGYS. The software features an advanced +open-source CFD simulation engine and a client-server GUI to provide a +flexible and cost-effective HPC solver platform for automotive and +motorsports design applications, including a dedicated virtual wind +tunnel wizard for external vehicle aerodynamics and other proven methods +for UHTM, HVAC, aeroacoustics, etc. + +## Useful Links + +- [Information about ELEMENTS](https://engys.com/products/elements) +- [Information about ENGYS](https://engys.com/about-us) + +## Using ELEMENTS on Cirrus + +ELEMENTS is only available on Cirrus to authorised users with a valid +license of the software. For any queries regarding ELEMENTS on Cirrus, +please [contact ENGYS](https://engys.com/contact-us) or the [Cirrus +Helpdesk](mailto:support@cirrus.ac.uk). + +ELEMENTS applications can be run on Cirrus in two ways: + +- Manually from the command line, using a SSH terminal to access the + cluster's master node. +- Interactively from within the ELEMENTS GUI, using the dedicated + client-server node to connect remotely to the cluster. + +A complete user's guide to access ELEMENTS on demand via Cirrus is +provided by ENGYS as part of this service. + +## Running ELEMENTS Jobs in Parallel + +The standard execution of ELEMENTS applications on Cirrus is handled +through the command line using a submission script to control Slurm. A +basic submission script for running multiple ELEMENTS applications in +parallel using the SGI-MPT (Message Passing Toolkit) module is included +below. In this example the applications `helyxHexMesh`, `caseSetup` and +`helyxAero` are run sequentially using 4 nodes (144 cores). + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=Test + #SBATCH --time=1:00:00 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --ntasks-per-node=36 + #SBATCH --cpus-per-task=1 + #SBATCH --output=test.out + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=t01 + + # Replace [partition name] below with your partition name (e.g. standard) + #SBATCH --partition=standard + + + # Replace [QoS name] below with your QoS name (e.g. commercial) + #SBATCH --qos=commercial + + # Load any required modules + module load gcc + module load mpt + + # Load the HELYX-Core environment v3.5.0 (select version as needed, e.g. 3.5.0) + source /scratch/sw/elements/v3.5.0/CORE/HELYXcore-3.5.0/platforms/activeBuild.shrc + + # Launch ELEMENTS applications in parallel + export myoptions="-parallel" + jobs="helyxHexMesh caseSetup helyxAero" + + for job in `echo $jobs` + do + + case "$job" in + * ) options="$myoptions" ;; + esac + + srun $job $myoptions 2>&1 | tee log/$job.$SLURM_JOB_ID.out + + done + +Alternatively, the user can execute most ELEMENTS applications on Cirrus +interactively via the GUI by following these simple steps: + +1. Launch ELEMENTS GUI in your local Windows or Linux machine. +2. Create a client-server connection to Cirrus using the dedicated node + provided for this service in the GUI. Enter your Cirrus user login + details and the total number of processors to be employed in the + cluster for parallel execution. +3. Use the GUI in the local machine to access the remote file system in + Cirrus to load a geometry, create a computational grid, set up a + simulation, solve the flow, and post-process the results using the + HPC resources available in the cluster. The Slurm scheduling + associated with every ELEMENTS job is handled automatically by the + client-server. +4. Visualise the remote data from your local machine, perform changes + to the model and complete as many flow simulations in Cirrus as + required, all interactively from within the GUI. +5. Disconnect the client-server at any point during execution, leave a + utility or solver running in the cluster, and resume the connection + to Cirrus from another client machine to reload an existing case in + the GUI when needed. diff --git a/docs/software-packages/flacs.md b/docs/software-packages/flacs.md new file mode 100644 index 00000000..cdc1fb9a --- /dev/null +++ b/docs/software-packages/flacs.md @@ -0,0 +1,297 @@ +# FLACS + +[FLACS](http://www.gexcon.com/index.php?/flacs-software/article/FLACS-Overview) +from [Gexcon](http://www.gexcon.com) is the industry standard for CFD +explosion modelling and one of the best validated tools for modeling +flammable and toxic releases in a technical safety context. + +The Cirrus cluster is ideally suited to run multiple FLACS simulations +simultaneously, via its [batch system](../../user-guide/batch/). Short +lasting simulations (of typically up to a few hours computing time each) +can be processed efficiently and you could get a few hundred done in a +day or two. In contrast, the Cirrus cluster is not particularly suited +for running single big FLACS simulations with many threads: each node on +Cirrus has 2x4 memory channels, and for memory-bound applications like +FLACS multi-threaded execution will not scale linearly beyond eight +cores. On most systems, FLACS will not scale well to more than four +cores (cf. the FLACS User's Manual), and therefore multi-core hardware +is normally best used by increasing the number of simulations running in +parallel rather than by increasing the number of cores per simulation. + +Gexcon has two different service offerings on Cirrus: FLACS-Cloud and +FLACS-HPC. FLACS-Cloud is the preferable way to exploit the HPC cluster, +directly from the FLACS graphical user interfaces. For users who are +familiar with accessing remote Linux HPC systems manually, FLACS-HPC may +be an option. Both services are presented below. + +## FLACS-Cloud + +FLACS-Cloud is a high performance computing service available right from +the FLACS-Risk user interface, as well as from the FLACS RunManager. It +allows you to run FLACS simulations on the high performance cloud +computing infrastructure of Gexcon's partner EPCC straight from the +graphical user interfaces of FLACS -- no need to manually log in, +transfer data, or start jobs! + +By using the FLACS-Cloud service, you can run a large number of +simulations very quickly, without having to invest into in-house +computing hardware. The FLACS-Cloud service scales to your your demand +and facilitates running projects with rapid development cycles. + +The workflow for using FLACS-Cloud is described in the FLACS User's +Manual and in the FLACS-Risk documentation; you can also find basic +information in the knowledge base of the [FLACS User +Portal](https://gexcon.freshdesk.com/solution/categories/14000072843) +(accessible for FLACS license holders). + +## FLACS-HPC + +Compared to FLACS-Cloud, the FLACS-HPC service is built on more +traditional ways of accessing and using a remote Linux cluster. +Therefore the user experience is more basic, and FLACS has to be run +manually. For an experienced user, however, this way of exploiting the +HPC system can be at least as efficient as FLACS-Cloud. + +Follow the steps below to use the FLACS-HPC facilities on Cirrus. + +*Note:* The instructions below assume you have a valid account on +Cirrus. To get an account please first get in touch with FLACS support +at and then see the instructions in the [Tier-2 SAFE +Documentation](https://tier2-safe.readthedocs.io). + +*Note:* In the instructions below you should substitute "username" by +your actual Cirrus username. + +### Log into Cirrus + +Log into Cirrus following the instructions at +`../user-guide/connecting`. + +### Upload your data to Cirrus + +Transfer your data to Cirrus by following the instructions at +`../user-guide/data`. + +For example, to copy the scenario definition files from the current +directory to the folder `project_folder` in your home directory on +Cirrus run the following command on your local machine: + + rsync -avz c*.dat3 username@cirrus.epcc.ed.ac.uk:project_folder + +Note that this will preserve soft links as such; the link targets are +not copied if they are outside the current directory. + +### FLACS license manager + +In order to use FLACS a valid license is required. To check the +availability of a license, a license manager is used. To be able to +connect to the license manager from the batch system, users wishing to +use FLACS should add the following file as `~/.hasplm/hasp_104628.ini` +(that is, in their home directory) + + ; copy this file (vendor is gexcon) to ~/.hasplm/hasp_104628.ini + aggressive = 0 + broadcastsearch = 0 + serveraddr = cirrus-services1 + disable_IPv6 = 1 + +### Submit a FLACS job to the queue + +To run FLACS on Cirrus you must first change to the directory where your +FLACS jobs are located, use the `cd` (change directory) command for +Linux. For example + + cd projects/sim + +The usual way to submit work to the queue system is to write a +submission script, which would be located in the working directory. This +is a standard bash shell script, a simple example of which is given +here: + + #!/bin/bash --login + + #SBATCH --job-name=test_flacs_1 + #SBATCH --ntasks=1 + #SBATCH --cpus-per-task=1 + #SBATCH --time=02:00:00 + #SBATCH --partition=standard + #SBATCH --qos=standard + + module load flacs-cfd/21.2 + + run_runflacs 012345 + +The script has a series of special comments (introduced by +\#SBATCH) which give information to the +queue system to allow the system to allocate space for the job and to +execute the work. These are discussed in more detail below. + +The `flacs` module is loaded to make the application available. Note +that you should specify the specific version you require: + + module load flacs-cfd/21.2 + +(Use `module avail flacs` to see which versions are available.) The +appropriate FLACS commands can then be executed in the usual way. + +Submit your FLACS jobs using the `sbatch` command, e.g.: + + $ sbatch --account=i123 script.sh + Submitted batch job 157875 + +The `--account=i123` option is obligatory and states that account `i123` +will be used to record the CPU time consumed by the job, and result in +billing to the relevant customer. You will need your project account +code here to replace `i123`. You can check your account details in SAFE. + +The name of the submission script here is `script.sh`. The queue system +returns a unique job id (here `157875`) to identify the job. For +example, the standard output here will appear in a file named +`slurm-157875.out` in the current working directory. + +### Options for FLACS jobs + +The `#SBATCH` lines in the script above set various parameters which +control execution of the job. The first is `--job-name` just provides a +label which will be associated with the job. + +The parameter `--ntasks=1` is the number of tasks or processes involved +in the job. For a serial FLACS job you would use `--ntasks=1`. The + +The maximum length of time (i.e. wall clock time) you want the job to +run is specified with the `--time=hh:mm:ss` option. After this time, +your job will be terminated by the job scheduler. The default time limit +is 12 hours. It is useful to have an estimate of how long your job will +take to be able to specify the correct limit (which can take some +experience). Note that shorter jobs can sometimes be scheduled more +quickly by the system. + +Multithreaded FLACS simulations can be run on Cirrus with the following +job submission, schematically: + + #SBATCH --ntasks=1 + #SBATCH --cpus-per-task=4 + ... + + run_runflacs -dir projects/sim 010101 NumThreads=4 + +When submitting multithreaded FLACS simulations the `--cpus-per-task` +option should be used in order for the queue system to allocate the +correct resources (here 4 threads running on 4 cores). In addition, one +must also specify the number of threads used by the simulation with the +`NumThreads=4` option to the run_runflacs. + +One can also specify the OpenMP version of FLACS explicitly via, e.g., + + export OMP_NUM_THREADS=20 + + run_runflacs version _omp NumThreads=20 + +See the FLACS +[manual](https://www3.gexcon.com/files/manual/flacs/pdf/flacs-users-manual.pdf) +for further details. + +### Monitor your jobs + +You can monitor the progress of your jobs with the `squeue` command. +This will list all jobs that are running or queued on the system. To +list only your jobs use: + + squeue -u username + +### Submitting many FLACS jobs as a job array + +Running many related scenarios with the FLACS simulator is ideally +suited for using [job arrays](../../user-guide/batch/#job-arrays), i.e. +running the simulations as part of a single job. + +Note you must determine ahead of time the number of scenarios involved. +This determines the number of array elements, which must be specified at +the point of job submission. The number of array elements is specified +by `--array` argument to `sbatch`. + +A job script for running a job array with 128 FLACS scenarios that are +located in the current directory could look like this: + + #!/bin/bash --login + + # Recall that the resource specification is per element of the array + # so this would give four instances of one task (with one thread per + # task --cpus-per-task=1). + + #SBATCH --array=1-128 + + #SBATCH --ntasks=1 + #SBATCH --cpus-per-task=1 + #SBATCH --time=02:00:00 + #SBATCH --account=z04 + + #SBATCH --partition=standard + #SBATCH --qos=commercial + + # Abbreviate some SLURM variables for brevity/readability + + TASK_MIN=${SLURM_ARRAY_TASK_MIN} + TASK_MAX=${SLURM_ARRAY_TASK_MAX} + TASK_ID=${SLURM_ARRAY_TASK_ID} + TASK_COUNT=${SLURM_ARRAY_TASK_COUNT} + + # Form a list of relevant files, and check the number of array elements + # matches the number of cases with 6-digit identifiers. + + CS_FILES=(`ls -1 cs??????.dat3`) + + if test "${#CS_FILES[@]}" -ne "${TASK_COUNT}"; + then + printf "Number of files is: %s\n" "${#CS_FILES[@]}" + printf "Number of array tasks is: %s\n" "${TASK_COUNT}" + printf "Do not match!\n" + fi + + # All tasks loop through the entire list to find their specific case. + + for (( jid = $((${TASK_MIN})); jid <= $((${TASK_MAX})); jid++ )); + do + if test "${TASK_ID}" -eq "${jid}"; + then + # File list index with offset zero + file_id=$((${jid} - ${TASK_MIN})) + # Form the substring file_id (recall syntax is :offset:length) + my_file=${CS_FILES[${file_id}]} + my_file_id=${my_file:2:6} + fi + done + + printf "Task %d has file %s id %s\n" "${TASK_ID}" "${my_file}" "${my_file_id}" + + module load flacs-cfd/21.2 + `which run_runflacs` ${my_file_id} + +### Transfer data from Cirrus to your local system + +After your simulations are finished, transfer the data back from Cirrus +following the instructions at `../user-guide/data`. + +For example, to copy the result files from the directory +`project_folder` in your home directory on Cirrus to the folder `/tmp` +on your local machine use: + + rsync -rvz --include='r[13t]*.*' --exclude='*' username@cirrus.epcc.ed.ac.uk:project_folder/ /tmp + +### Billing for FLACS-HPC use on Cirrus + +CPU time on Cirrus is measured in CPUh for each job run on a compute +node, based on the number of physical cores employed. Only jobs +submitted to compute nodes via `sbatch` are charged. Any processing on a +login node is not charged. However, using login nodes for computations +other than simple pre- or post-processing is strongly discouraged. + +Gexcon normally bills monthly for the use of FLACS-Cloud and FLACS-HPC, +based on the Cirrus CPU usage logging. + +## Getting help + +Get in touch with FLACS Support by email to if you +encounter any problems. For specific issues related to Cirrus rather +than FLACS contact the [Cirrus +helpdesk](http://www.cirrus.ac.uk/support/). diff --git a/docs/software-packages/gaussian.md b/docs/software-packages/gaussian.md new file mode 100644 index 00000000..f5b75f0f --- /dev/null +++ b/docs/software-packages/gaussian.md @@ -0,0 +1,116 @@ +# Gaussian + +[Gaussian](http://www.gaussian.com/) is a general-purpose computational +chemistry package. + +## Useful Links + + - [Gaussian User Guides](http://gaussian.com/techsupport/) + +## Using Gaussian on Cirrus + +**Gaussian on Cirrus is only available to University of Edinburgh +researchers through the University's site licence. Users from other +institutions cannot access the version centrally-installed on Cirrus.** + +If you wish to have access to Gaussian on Cirrus please [request access +via +SAFE](https://epcced.github.io/safe-docs/safe-for-users/#how-to-request-access-to-a-package-group-licensed-software-or-restricted-features) + +Gaussian cannot run across multiple nodes. This means that the maximum +number of cores you can use for Gaussian jobs is 36 (the number of cores +on a compute node). In reality, even large Gaussian jobs will not be +able to make effective use of more than 8 cores. You should explore the +scaling and performance of your calculations on the system before +running production jobs. + +## Scratch Directories + +You will typically add lines to your job submission script to create a +scratch directory on the solid state storage for temporary Gaussian +files. e.g.: + + export GAUSS_SCRDIR="/scratch/space1/x01/auser/$SLURM_JOBID.tmp" + mkdir -p $GAUSS_SCRDIR + +You should also add a line at the end of your job script to remove the +scratch directory. e.g.: + + rm -r $GAUSS_SCRDIR + +## Running serial Gaussian jobs + +In many cases you will use Gaussian in serial mode. The following +example script will run a serial Gaussian job on Cirrus (before using, +ensure you have created a Gaussian scratch directory as outlined above). + + #!/bin/bash + + # job options (name, compute nodes, job time) + #SBATCH --job-name=G16_test + #SBATCH --ntasks=1 + #SBATCH --time=0:20:0 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load Gaussian module + module load gaussian + + # Setup the Gaussian environment + source $g16root/g16/bsd/g16.profile + + # Location of the scratch directory + export GAUSS_SCRDIR="/scratch/space1/x01/auser/$SLURM_JOBID.tmp" + mkdir -p $GAUSS_SCRDIR + + # Run using input in "test0027.com" + g16 test0027 + + # Remove the temporary scratch directory + rm -r $GAUSS_SCRDIR + +## Running parallel Gaussian jobs + +Gaussian on Cirrus can use shared memory parallelism through OpenMP by +setting the OMP_NUM_THREADS environment +variable. The number of cores requested in the job should also be +modified to match. + +For example, the following script will run a Gaussian job using 4 cores. + + #!/bin/bash --login + + # job options (name, compute nodes, job time) + #SBATCH --job-name=G16_test + #SBATCH --ntasks=1 + #SBATCH --cpus-per-task=4 + #SBATCH --time=0:20:0 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load Gaussian module + module load gaussian + + # Setup the Gaussian environment + source $g16root/g16/bsd/g16.profile + + # Location of the scratch directory + export GAUSS_SCRDIR="/scratch/space1/x01/auser/$SLURM_JOBID.tmp" + mkdir -p $GAUSS_SCRDIR + + # Run using input in "test0027.com" + export OMP_NUM_THREADS=4 + g16 test0027 + + # Remove the temporary scratch directory + rm -r $GAUSS_SCRDIR diff --git a/docs/software-packages/gromacs.md b/docs/software-packages/gromacs.md new file mode 100644 index 00000000..1553f329 --- /dev/null +++ b/docs/software-packages/gromacs.md @@ -0,0 +1,136 @@ +# GROMACS + +[GROMACS](http://www.gromacs.org/) GROMACS is a versatile package to +perform molecular dynamics, i.e. simulate the Newtonian equations of +motion for systems with hundreds to millions of particles. It is +primarily designed for biochemical molecules like proteins, lipids and +nucleic acids that have a lot of complicated bonded interactions, but +since GROMACS is extremely fast at calculating the nonbonded +interactions (that usually dominate simulations) many groups are also +using it for research on non-biological systems, e.g. polymers. + +## Useful Links + +- [GROMACS User Guides](http://manual.gromacs.org/documentation/) +- [GROMACS Tutorials](http://www.gromacs.org/Documentation/Tutorials) + +## Using GROMACS on Cirrus + +GROMACS is Open Source software and is freely available to all Cirrus +users. A number of versions are available: + +- Serial/shared memory, single precision: gmx +- Parallel MPI/OpenMP, single precision: gmx_mpi +- GPU version, single precision: gmx + +## Running parallel GROMACS jobs: pure MPI + +GROMACS can exploit multiple nodes on Cirrus and will generally be run +in exclusive mode over more than one node. + +For example, the following script will run a GROMACS MD job using 2 +nodes (72 cores) with pure MPI. + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=gmx_test + #SBATCH --nodes=2 + #SBATCH --tasks-per-node=36 + #SBATCH --time=0:25:0 + # Make sure you are not sharing nodes with other users + #SBATCH --exclusive + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load GROMACS module + module load gromacs + + # Run using input in test_calc.tpr + export OMP_NUM_THREADS=1 + srun gmx_mpi mdrun -s test_calc.tpr + +## Running parallel GROMACS jobs: hybrid MPI/OpenMP + +The following script will run a GROMACS MD job using 2 nodes (72 cores) +with 6 MPI processes per node (12 MPI processes in total) and 6 OpenMP +threads per MPI process. + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=gmx_test + #SBATCH --nodes=2 + #SBATCH --tasks-per-node=6 + #SBATCH --cpus-per-task=6 + #SBATCH --time=0:25:0 + # Make sure you are not sharing nodes with other users + #SBATCH --exclusive + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load GROMACS and MPI modules + module load gromacs + + # Run using input in test_calc.tpr + export OMP_NUM_THREADS=6 + srun gmx_mpi mdrun -s test_calc.tpr + +## GROMACS GPU jobs + +The following script will run a GROMACS GPU MD job using 1 node (40 +cores and 4 GPUs). The job is set up to run on +\ MPI processes, and +\ OMP threads -- you +will need to change these variables when running your script. + + + +!!! Note + + Unlike the base version of GROMACS, the GPU version comes with only + MDRUN installed. For any pre- and post-processing, you will need to use + the non-GPU version of GROMACS. + +``` + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=gmx_test + #SBATCH --nodes=1 + #SBATCH --time=0:25:0 + #SBATCH --exclusive + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + #SBATCH --gres=gpu:4 + + # Load GROMACS and MPI modules + module load gromacs/2022.1-gpu + + # Run using input in test_calc.tpr + export OMP_NUM_THREADS= + srun --ntasks= --cpus-per-task= \ + gmx_mpi mdrun -ntomp -s test_calc.tpr + +``` + +Information on how to assign different types of calculation to the CPU +or GPU appears in the GROMACS documentation under [Getting good +performance from +mdrun](http://manual.gromacs.org/documentation/current/user-guide/mdrun-performance.html) diff --git a/docs/software-packages/helyx.md b/docs/software-packages/helyx.md new file mode 100644 index 00000000..5f5a2046 --- /dev/null +++ b/docs/software-packages/helyx.md @@ -0,0 +1,105 @@ +# HELYX® + +HELYX is a comprehensive, general-purpose, computational fluid dynamics +(CFD) software package for engineering analysis and design optimisation +developed by ENGYS. The package features an advanced open-source CFD +simulation engine and a client-server GUI to provide a flexible and +cost-effective HPC solver platform for enterprise applications. + +## Useful Links + +- [Information about HELYX](https://engys.com/products/helyx) +- [Information about ENGYS](https://engys.com/about-us) + +## Using HELYX on Cirrus + +HELYX is only available on Cirrus to authorised users with a valid +license to use the software. For any queries regarding HELYX on Cirrus, +please [contact ENGYS](https://engys.com/contact-us) or the [Cirrus +Helpdesk](mailto:support@cirrus.ac.uk). + +HELYX applications can be run on Cirrus in two ways: + +- Manually from the command line, using a SSH terminal to access the + cluster’s master node. +- Interactively from within the HELYX GUI, using the dedicated + client-server node to connect remotely to the cluster. + +A complete user’s guide to access HELYX on demand via Cirrus is provided +by ENGYS as part of this service. + +## Running HELYX Jobs in Parallel + +The standard execution of HELYX applications on Cirrus is handled +through the command line using a submission script to control Slurm. A +basic submission script for running multiple HELYX applications in +parallel using the SGI-MPT (Message Passing Toolkit) module is included +below. In this example the applications `helyxHexMesh`, `caseSetup` and +`helyxSolve` are run sequentially using 4 nodes (144 cores). + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=Test + #SBATCH --time=1:00:00 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --ntasks-per-node=36 + #SBATCH --cpus-per-task=1 + #SBATCH --output=test.out + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=t01 + + # Replace [partition name] below with your partition name (e.g. standard) + #SBATCH --partition=standard + + # Replace [QoS name] below with your QoS name (e.g. commercial) + #SBATCH --qos=commercial + + # Load any required modules + module load gcc + module load mpt + + # Load the HELYX-Core environment v3.5.0 (select version as needed, e.g. 3.5.0) + source /scratch/sw/helyx/v3.5.0/CORE/HELYXcore-3.5.0/platforms/activeBuild.shrc + + # Set the number of threads to 1 + export OMP_NUM_THREADS=1 + + # Launch HELYX applications in parallel + export myoptions="-parallel" + jobs="helyxHexMesh caseSetup helyxSolve" + + for job in `echo $jobs` + do + + case "$job" in + * ) options="$myoptions" ;; + esac + + srun $job $myoptions 2>&1 | tee log/$job.$SLURM_JOB_ID.out + + done + +Alternatively, the user can execute most HELYX applications on Cirrus +interactively via the GUI by following these simple steps: + +1. Launch HELYX GUI in your local Windows or Linux machine. +2. Create a client-server connection to Cirrus using the dedicated node + provided for this service in the GUI. Enter your Cirrus user login + details and the total number of processors to be employed in the + cluster for parallel execution. +3. Use the GUI in the local machine to access the remote file system in + Cirrus to load a geometry, create a computational grid, set up a + simulation, solve the flow, and post-process the results using the + HPC resources available in the cluster. The Slurm scheduling + associated with every HELYX job is handled automatically by the + client-server. +4. Visualise the remote data from your local machine, perform changes + to the model and complete as many flow simulations in Cirrus as + required, all interactively from within the GUI. +5. Disconnect the client-server at any point during execution, leave a + utility or solver running in the cluster, and resume the connection + to Cirrus from another client machine to reload an existing case in + the GUI when needed. diff --git a/docs/software-packages/images/MATLAB_image1.png b/docs/software-packages/images/MATLAB_image1.png new file mode 100644 index 0000000000000000000000000000000000000000..bc7e817b4e55cc7c146202d39f75f3ca07754dc6 GIT binary patch literal 36773 zcma&N1yEg0+a-#-yF-vba1ZX1;4UG!!@+|4!QBG{cXubayIXJw5ZocS%;x>xTlIf; zrly8M&Stkh)=#fq9j2rpjfzBs1OWko`uUTD3Iqh?8StZn00sPpY09w!UO+mkNQ*&K zPLLb`4>0DU@}dwBH8IFfhOodhqWvdrM+gYC-nSpfLAzoT2#6TI&k~|)Zh9x}u5LKP zNtkF^g(zYa?Z75@ia)H(Wnugz^|(?v$veNNOb6uNz@1Fq9M* z(-j$Lv#_zT6%&fMk0Q=pf8(h@;_Ag{Nt~OT8$+hEw6y$b{~Jm}dA;59_TleQtLvGW zsc8*SVRLJ%B2M6G`TW`%JeonTmEo4v9L?Pyt}~Y7F8}y6^vv@0{cq2w&(W3J@hR#!=gh(@f!BO*T9u4?J( zj)y;3c51cL4j;%!dw0|8l;^HC-)`=vcM`|w8b@hluoik5 zotaT7&joj~=(R1*&;L0)i%}2A*;zfbk=(eE{HZ?vK&qkq6g_}XVvR0G-*dT%N%Z}D zVxk|v-}3U8@?13yjoOFOQu=Z$3pyTwviCj=c-OhsTH_CaTjwLHmU16qAU)9VLSM>? z7)Ujr5-8*6>*`Yz>Uh3$&M`TZ97l!G^5f*psbrGBikh_E@=jYUjM zjENZ+7w7lKuxH&68vzeRgw8OKIsN!66ciFD9Y1=|LuS9{BBvN@Kl%vzL`DWi^ao?4 z{wMwz{P&7@-)FmV@}(%Sjry}Ok>DnC1=&X1C_mm0wzGJVu!u6|u2J~$z*J|Oq_f(l zpMSt3;IF3JLCO3O;X0<(VmxhDbsHFyn|IrF6LK54zi;t{I(v=c-gb2Q;3h=W&scIq zde`5JLJsO(e|kFlF7xOT_h%(MScIkSEy;_Ei={H?dSdExEU0P&2q+T=x2ur85C<7Q zN;NTT2yGN69(k}P+*~jJbXhV>h-@F*?{<>C1-N>DR4sqOa+o9-WNaj43Z#IU2#6>Q9F10vc@)~)bDV?J-GDsOG zCML!=y5C{fTYThivbck$=kGLP2u`eSKy-I^KZhTgdJkoD&If)^AuODADrTsxjJblk zbS;j_UDyoWf}x3WQ;nsC#VBV`btL*O&45+WwL z4)GV0h*=h%TxA5`i-unYlHAWq65N;B5(FPdt*a@4s+7d;N|aAcEG{S-CbQ_0CRFG$ zkv=Jn;(?tii!)A@;JaY^5Da_}E-8z?KNxj?3(FZX+5$&_mxuvqRBK~~KmC)Qx$?x! zPc4&h6Iu5584>2U7b(A%2FZG%fT9D8&!^{pzyPOZV~Sfn@l6WP4g2IEM|5&Ttl?mR z0t@?pe?v-hA{i$!k79{ql1$S@>hX;)#SX%LYmBC4vhQ9#h2a8e%VFtt%q7l=+DqhB zOI4xAY=%l1w#$ZuhPZ{4sfY?h`Bl_KMeRTn8bL#eq9-2i39KFkQ~fI&T$VMB?(a7+ zIw-bEeHOt?viB`8K#vSp4|Ildt5jV=XqCmqUA49z9v*yre1Jvx+%pv)6tUTGgtK3N7xo+ z&C3YIRB<(4JnF7$)lQx3%?nixxnjZj*Ihkb)iPEGu1M&BT2B}R?VS$jaUJw?GQZf^ zSO(2XwaPp0Qx0McDyYX_OH11N`Z0f^qbqcpm~|RakUj3l?JV~icASMSG?9#IWQI@R zV~pxXWr$j}rpM=B{svSlYLXgBig$U!2nY%q3NLs)CF^w58*))@o^hB@aDMz~?9LjQ zCd^jVq*_amo%*p}=gn z2og%wta3b{RnS@W2V>S-(oNsxgx4-tsh}5iNK2Ea9wnDYh%!U|3gn^cF%-VqN9g_7 z30C$pX)$~;jzcN7eNULJ?QQ(y=8NRhzR6VG`M7O2NwNyI7uIAh<5}p zCv^#F`7x1l*)>W@(PwKvfs9VhZ@medseB!58 z*|>$rAR2`x;rO_eXFm!#%0XF*TLg?W^dFEMNiKxff>7tX{im1)P0$u9nwlYn0gz~# zAP`7J1uF?vgpPqBAU630-`m-lYp(ir9`6-Dy^VSQb#GMRIhYcpMWFgB2vtiAV;~5! z5d)m}six`H(f)k0ycFu9t|@@%Mt^zBw=$x&+srvQLh({xatr=YA{*&2N3wjHO8E7u zZj7s%2d><2jiu{fXGhmbi_OZK)IhgEX|%n0J+BVJ5BzM05&Iy?Fg9o~pn-D4c1Z^L z79vQdwaV=dc5-JCg{|t}-3}PgfR>n`%cVLuLkHLaiT>;58gSk*V9-NOC}^j9B>N0A zR$dqMJayI@#g9B}PUH~I~4QGjuP(Yx^cT`LuM^+WaF z-V~^x(%?0AR=T(H>{aOaJh?R}9tX9IY&hMRQT;y(7+%Xh{RSPu6zv}SNVy0>?;TPn zH}OyB9%f2c03#SxYG+1q!mJpCkmkf=K>qO?3+N4q8jl!ocHVwv%Ar1Z#H1|4@+Wg( zZ7k&uEKq*a;M@422k3zIO$m~jg|}gqUp}_E{HHtP%^6ePS*G;v1Wy>8#u;Lm%}cd! z^i`5?!z%<63L=>EQbpUVWNdA1X;$eu>vf>(#I3LEffl5$YaB)Y(0X6RoQ+tlWKK0R zm!3)w8h})>Ffi<2U!J`mZ@Nu&jg6`1UQkD}wxm6ykR?pM!%# zlikJ(!(-_3jcWGd(o#2`K+yOy1^@ZO-vnC4)3sLDljVj8e3rt(LZdV+Vd2i>CS)rEt-Hlrx_MFB>ytD?$(_mRH z``ZH9WQ8oDwfT8mEG&N;nUTa-U(~tzdBc;~`__#Y=Gqyq68G)yFDuyhAKcNdu9D>K zbRW$@3j;qpj`ph7cs&1l*h{H@R@IcyaCb4}ZB&-4SNkBAz=w*8N=Qh!lh!Wt&Czjt zd%H%P7^4Ts3y(WX{1z9rhLji0j0_qaT^Sz^y5_Q{41O&8PV5M7JbMD3&20^F0|b4; z-MoNI_yT?O_g}&i0UUFTX<$|49!@uPe|FIy zh2`%=9F}c0*mA!oC1$lJDo;IIpVzo^2zG*rhI>SoP#Hp;{i;n95>;St>3j%>Y7wU_ zZ);8SsVt-PZ_ww0-*q{VL9X9ycs}k9teB(Tvbbw#Xc(Oo@BH2_TBB9u=Hy=g`6_ax zk}OBCXgOw9%Fi&2x&PXUjVJv*S(+Dgc7fHrZWq1+fWuyF-%J?+=HV1 zyR|V@q`;0Dmg@G-&h=>Je%2Zl{z-IXq&kQPBaQXbS6y9)ayCeJwSnqtHN+&EE}bLv zG&HZX>er(24r!`f4R;u^aZhSH4>M^cOGcwkEU}x5->C^65e?E1Ki=cH+Sq z$cYLYmx}BLZqL1d8hfncWGyru4Gx7LFgk;6ZSH>cBs}PnOE5Zyt8Jg5_^=rvt21^! zIa_^@9AX#j?(X)zvsx1NvAmsb8g)InNwsYHU=>Tk=Y%lGE*K$zrEza&EBxgqQGD|n z6;g}89Ko_s`X*(R4)bd|FUDqi>e>z*e%86!iGg{t_f&T`7aj2c9UrfN!v_C*9x@=`SjX_s)9W|#Aub)MTE6KnR`};WRm>+IHNa3>+2R(RLX7zbs{hdT~tO zs7*~aKH)Z!D(noCj9lfl}UHvK0SA2p{uc^?0vI&%n>?)`z@4ud{x&dY<+xr+B;+d_oDzq{O@&O2L+e{rj(FDgG>tT9%adpd~B&83`c z8Af;PLTmGW<)l$vR!NW_6Mo)bFS z+g%cuCcy`@L1E>YB1c8Ym(DVfG34|XYYA&@9Gv)NP8e~jux0I@XWgA%d>mArZMO@g zuh&+fJP_h1<@#&EmR~E*j58l#l1I9nDNXt*s=wUuzcXntDbaQ+LkHwFjRmZ$nI{At zbHdO#m>^XZtzwTEm>tw}o;D$16Ob}EnZX}ogJor9#ci{^v%A}9GK^K8dq*}H@)hD( zn?R|FK431O%g^xdpm%4WOoha5P$72~V#%S)gvE08I;piYMvDIH)}OM_=6Y$xta06K zm(%5_+4f&GQMF?XLOhEVuj(7UM}$J*j2gp9Y|1|skjjHU4U~rI$HN=N&d(5Cenoi( z-=4dwOLSRBI%b@Z^6xf{znV|g$bS|Ug|s3i#l^)v--W134GauiSXdYs7ytkOkSf#} z5*ZoUY@z()#UMF0HulQ$a<$gVj~*bCY2j_Y)S9ouC`S zs%1fxDrX6@}$*1|HFXg&cY9*>;QHpI%MY7BDjzfTwRw3P> zq^P9QV|>sT5sI(z!|>>+ctrv1CEO@Iu(BH<28ew%PCFmjf6`-(lIF87<1)+=}+?-SM>Q${c`idKyw5{51I> z&VDN*un(O*Cw2)_ZU+tDIr`@yi{eFVY8Mt1umwHSZ@P(Qlgi4Z=g=J#!z3(Wbor?T z_7G!RArJQUDVaocCr**GO2{KU<4R}|TblAqE(qkt?zKt>PhZW>y_QudM2P1LI^z)m5xUzGeRa%z4@oXjctKZ-O3dxmWXR&FN95_1 zdM8n@y)I#><9}NYU}y`Ut&0gkOY?PP^$HcpW+(>Qr1rPBX#rV6zK9Bg^k9LfNCB8A zfp<)?0-~r{(cE^c)P>4Jj&qB~GbFT_o~7H{3*W)F^{-CFK3Le;%Bt`rfvJHfD@}@- zoizIvi@D=^s(5F2px@j#mXOdbUiEN^`+~yYSz##2fZbg)65AhBEouW!kG-R9s-nAm zlGQ|Jj0ogo&ms^@NeR6YCRoq7>Y_ic{a8Qj4SlW7@(i4_%j27-ew*PJQYZ0oh@0c3 zs}RCbmy??Zv+iKu%$jTR&U$0o=z!ds8q5$5whbGBwhz@7kJIe$?L9(Jd*L1UBKfs8 z$5FYUhbAXCT3uNl5x>GcF6s0lqM;4c$SDGwd|7LeuU|>h*A08mA=f!}G5yXbf>R-f zY%d7^$-OoObJXqw`CNu|@=nY{bFfl3t&qkxDN)710@3o%h;|V5P{%D10!%TKD=W&B zTPjPG2Y-2&Tq|1b5XxCldf-eH4C+|-SoQs8yF7Q0pg4;g>goW1AjFmp1U?Qh5pP^V zLxW#CKF$_Cf@5hnirRyK0y%-@2i_;0R4f(bHxo3b9+^x~c1-ep%4#Y0IHp|kC$B~4 z1MiaclFfdiKTsuxoRJM_O)>wG{9~}oQ_Rrt-Bos3Mn-#kmX4Y!=TXLiB6rHmOG?U( z%T2lP4?hPvITZJS>{D|#C=mO8zl(aCaF$p5CyGu7|PU2k2-Gj>fj3mEkT_zTLzrFPlye30w4CXpJf?&>%T(AxC6K z%sZrXxOFOg>16sa={JIpc-^^C}%&*CDecKNFfX6s&5 zvg1)9%{Faa;?cpC^N$cqv@M!5q-27WZ^L!OyJ+V|-w*j~M8zr%3G&*-J4e65y-FL- z5}|)nRWlEV;iXQ2sVBb-48Hk$AoKetjWsN)!^XWrW}c9XjGgVT6M1?_x+MPUUmAxn)m#J z)&BUO)r55$n-2ZW8NW2IgP;C%MJ^#+P|No%Ly!<+x9YQO5ZTPCo753GUhia&cHdcv52r zW3D@a=uY+tGpffp7>zrs&O9{V3Bd~C+;Aa+kN_l(ptT@0+|2-#;t_<6^O#yf~ zgSi!l2JQB%=>C$A;+fQ0t=*m4=1I&m(1>O z6nq&OPU5I5wdS#*Svedmbusn_JX-k*v>)Xd#!;~T+Ya&&fjuY2L==vxscEqT4;6A> zroCA{9TSseR4<`6#Z929$HDP&;7OTg)x+O!x2J1KNq7sA_%t*$#U*tv`cD(p0)tbl z74LY{{A*8QOe%%H*DMQr|M9EWzb#^3OKV%?x9raB(@sM-gkP&~YltnBFeVD_Wk`qTYhR&PFVB!v_N#pL1f9%(kE z)+~1_q2263qPwcdobP_$j1Om+T-X#dlGYz7ocwz?3d(}WM?XG!OO((XK_3a-1o=5p zwpoT=mZqQekKg{LbO0O1Q%h9-_#@ri@R}tpzUZt)LS3iVVMhUF@7q%#IbhN2@VMJq zT9b~+67W@a-cm6$rTSu>S9j!Q|79sNFx>xpd%o*y-j%Lp7%tai0{xV~e`{6X-KQT2 zNNy4Pv(JloZQS0d7sOX&4f#I76@5)g`OFyzr-785FZ^Qql7%^J{?)WuPS|E-xmQ6j;r6j zA?M5vM++6eK~J|V!tTh>5Fk748l(Ow;*T1YUlHBgYigzesr~xp`gn6f%ncHWz^3gA zD~3!d^X_K~svilTVaDwEe2RRz{mRW!AZjGhW**WXpXFXLrf@I=GuhngPVX5QD#@r*OFwp=W)yxKY zg4c(t?bJwO?j=1m>KDtr)NuxRynZXoYadak8YiCv)Rsy?wz!w3d8U?N+?;G<%4`XI zf5k{I)`m<>kCWlUVHkFal>mmPSh5}^6;+k$!bucMiM9DdLzyOvGdcXd)g|?!p3o9% zqJ4Y-U|mW|;8ebolM}^E0l%SOO=Jmfl*IJ()s+>`mnV0gRzCMjYDOHmn5d|JG`{`) zeFQYL;QY*_Bsl)0_;>&dhrmSty}q!(y?H5>89fnwX>7`Xwo*V6F=RcG+14{klk?3^ zwAJ{s#?D2`Zi(2lv60jF;+J~24=L05qIqnfndXM?6VByrUeP3(khjb01$I`ia$_g& z36k2^W}mB_KOGO-=7)R=6|2~Oq?~t_=8HK$_8UuwUhrujv$)#bzdr36u5OpDhupB} zI@cS)w58S@-WH=mh6+M%$)Sg!lLv$7x7BnUXA8+Ik98JWuU2HsUTE%SJA-A0vKOV{ z@9*!6OQ0cCqIU&hbj!XS<-Vh}m3V;&6Vs2>2|{xr_HP0wI5fl>vpGHV>iMzWm<@HWmamHDqb~1Pmx&M6-(FNUhUX#m1Yt`wtu7yswkz{V_}sfX>|dyl zIX-<2i1)Nx|M~6FmVBt9wY}-Id<|DkZ3FV%`nO9j?W>FWOR21(v2oAdngF=Xd5AMq z&!)-qb@qPaZFA-$p|zh62^FKSCbzvId9!6-{1&k{p%l=CgoNCLSaK>l8bLRK$g;1e z$Nn1`>3@Y?Uk<#B%S%gpg!sU--g9#$RaN-n=^4c$m%&cV!8|+LKz80-+lOWx6z1YP zyMV9mIn0q`I=r5nBCX?dQd0Eskc3~JFu8vhgP!8?`(nwW$pn!! z%g@ocD1dD|AoZU=e*$6I^VU{TQE}13&Fy%(VRUWH`}yIoy(l8>JA2ZDot=8ey$Mo& z7ci6^(W1GWJu9<8+~n;`fpB1BJcfR<(6dg=q#)F4Ph;iK66@ooA1=lg{(?{W-PUt4 zDss)F0fZ}1xxBf#Z}#JUS^+EcI5@$a>SP&GF-?GADT zaF3g5`R*J$yYw>X4?Nep%6Ih2)$W{Zksb5lVFRu)_8VR%j^5&eNlb4RxynX?VYqd6 zH)0@sfedF+ns$5)n&Slb^$v84&SMV3bC^|y+zE0ln)6I%t$N?=@79HAW(l@AHF6QA z9UTc4)#yg!7rLv!-e<6)a)mJ?bQ&WC%*!%`OjO@#p!c%BBpshs3G&Q)-rfc2oS0WO z>2>`2N%Pzat9MnceOR->WISRJ=b`?KjXm_vG}W8p24-lA7z-pXpGq1P4>BDkm>0gU{lT-?XS%Jf?B#+qm!I|b3#2(INu^%EreKaENSc&5> zhEQvz$Ahf29ts`ak1p0X#&?05?57J*)KEmtSvA0^rfNF1JqwJ+ze6BJpshi8mD+P4y~DmSC6X z5cA_(91#Dqm3$RLhhUpK7Zy3XAizXk#sn3k&9kW%#ee?fIEJNUK}wuX!g8qz)t4#a z-DdC(fl^2KpD3ZPel^nt4SR+5pnrA;4u4*2eqQOScHGV03jy4ea`jy zGRpJUsX&|i>ojoOZ@2F0)b^-m|EU<(9oI;aLfEltV(VhZnM%$9*1L^0NZpF|8ymqe zD4XBvu!{!MN(PR=D;5U_r>lgZ&Be59MFRk~+}(MXQ;02Or3Wjbo5kYhT$EBElg_{*oRK135;lFL&Zief68tY;1r&au z9GcLoytAc6&QFGr&q2?XaKiXdQc_a8@9?l?SdItacJT6&3VAh`m!tg3M_m-1GuXfnrgya>S@~?=Z1pSs zeZty+h7-;eM@^0p8-mN%?@0WdTNnP6C<94sjc?Hu)xzWrvXN)mhB2fq4Te$%P@5Vj z->s^blDL|1Q}}q&09DQV5?tKE!T$b&iVDK#9vFWcVQ@^KP)ig2{oQX5m`hx+vx$Yd`>$>a~Y4PZZoV?O#|CQlvsKhL+q zc-fYupxx(LdB`J8vsQdMPN!~l{1Zl(MO=}F zyUj*;eyh%1UJ28j=1NKz^6kQpzdEIF5^(yycnuks#s=tZ4~)bAQB|@?&c5IN^!!9I z!-z)Axe6#(6F$3qn|60iDd5# zG4)?|J3vKeV_dBTlB@=8v|fQkn_SjO_)pr%IvTH;=VTyZ|B7Gb4P!s(Q5rgA4C3T~ zjM>W4vYU$#dBqxHv=<3YzMCG}{xZhUZ{77ssQq^`gkSXdcl5x|#jq1A(L`!K5G$Xn zO%rFBf^L?D>>k@h3M1tGu0AF5l(!+Pe(%NF$7p#|Q>ur_CpkF@n`Q?WmuP?sVzIC+ z{A@ywLGW~O?CiMEESF;ZI!OLeFSRg?-0dmb4xb^jQ zNJz-vzkkQQhlUGbP$nlQZ)P0eio!KAHtufP6d|ncJYQB&RFrO=M(xXluqGwk{D#`_ z8;&Z~7-FBD0oq9M6X^$l^rB2`Yoa*vJ6C$jR0FOrQ%OaoM@Wr@fq|h&O--#!hxM^@ z_VCcEO}7i8ivX-QMGAH>;3U8UUjU|LzzmnB9Js`~NF=fE3o0?7;#fq2T&mg0l{R6P z)Kr;D!*QV_baRqMN>B7J%h{y2{2EqGVQ2uM%vw!{VFx%qW`z0Olh+}v6p$h{b&dfQo$aKYB0|($`5%zxlB<_2mtjiijp_lN6JSsfOn46_Q9>&y z2$s)jYtWq^ar$Bj_a>t!JcuJKrkM8teuOk~o zns7ldImqIuMBhh z+#B(Z!v6=ovL!+5J~fLsRZkkqF%69SllNH6l}Y%PX}packPnv+6J{3ZozSzt4Sza_ zPcOcLz<(zC*nye*c$%SFeK9~|86d(`4gKTDIR613edW% zK`t0ZEZUK~XjmCUB2oWompPBYS4Q*cS?4oJES*XBcrHexQMN39tp&?c; z_~wIvK9c`$E)OVnlCnPBBf;Mbhwk1>s;XF*J)AU=D*!7&-{BAwun@LjXkCw1#%UV) zF}a(t*5&7xa(qz1?G$}f0jYWg$Uf>6W&ZB4;!7(BN5a1r_+Qb50NJl2#F+I>r;IRs zAsiF}Q@k2}_L;qeM+kd7Ns7REvlWRAz~D<#d?Qo5edI|(m*9Q-R0N-!larI1oBQ?S z)bjj%(86wEULF;&%D#WUyV)HeC@5H@kkJDt+r?0Ja24>U;Y3i2`v(AM%*lz^s;hp| zh^opZ7{v>PaL5ixN=gDQ5!@X8nJ?2cSXx>dZb1nr9Q~T!bLATN$3{4{m4?UD*kBpWngtmGKf?`B@R!ZC0+)Gkg;pjdx37JB> z0>3tGlKOiSNk|xK*T<6F>*KYd+QzZtouuHI{GQagt(NUI>HsaIp3m*qH#cG!&_Y3b z3l9d|AMl=Kz|`UDN$?z)^#Sh>nuucgej(R54; z?JgdMF6+l=tnia}0YHMq4ZvqQ-|X&>z<<}Qs;c_CqO`GbVR90k37pQQb)Hu9KEzVx zsa`!b9r}Rl@p5@$z2leTvqEps5shFnE~{ck>slf(7lyZJ2%cVWw|uFrNG z9X_+@D@V6vtu$)&uFiQSwp+-DNG*06dv}5zt_MyDlvIDRb1!l!JBiXh__IPcTE9ob zPzdITL*x01aNbxM0V?*KL7w>32J0i+xV|uT%gOfryA0JM^v=KnpwEugn&0SDP_4{_N}o;)&c42^Vm$ zRhe(~9r&}h7Y!<6z!YbFI;+QuFc9#jRPG8*^@CKVtsE}o^NXZjzP+Am2~#75VUCkL zkT>75YS9jrUT~^HV#?Txj~s{l4~UzB8`U7#Znt3+3SG6)=6?Cq-!83qD(voY^c%Zq zKX+oAcIpdpi6qy{hs5cid3=84jdsuZ*9Q&df%WY8%IkQ(dWfg4WUSg>VEp2Q+M=p= zd}V&@Z787|g^x8%WN0nw_enTmvZ6HH`QB5^iSL|_{`3SPdwYA^UkVPln)saFce+lX zP1uZ#vJ{t=T7f`q03#=zm=z$v0Emgbqoeq(>gE$XGqhft8zl64r|;`T20w7M1q}PO z!;6=i8s>2y;v4t1e;tmrv^3qquV20w{rJ`NpU%g?u?S{U#$4F(#0I#MeMnxyM8Xe7`Iu+QFSjqzMIRqSOh^} z{3-stzt2Y&s}1j-#>>srYCn_Q`SkdG35~yvS-0Kc??k(`>#2T+I}2qI62C<+ZH_?I zo|)5HO3ShL*}9GtysoGjCq&85?FSnOjk@Tx(7PL z$_7btZP{){F&J3F6GP#`wAY*#Zjf0eO8LP?j4Cag;43e1qk6!~B*VAUcapzz5<;-kn{;bFXq4b(T z|AShKS$G`WTf26ZZLWCUl0OaN3qQgIdLAP`TpvlUhZuXVBQR4t3m`4=wB2$Ww| zEGwV?4XWL@HpYxTf)%^x{=>3+@`zv546Z-4{$*F4T6b;yr4Sl8S6DP>ZEkMr?T zTQomEA44vj6vQ(I`|CU4LpC-xXlQ8XwW|Qfs>S(8MA++&bst~_iy%`NC$d-thV>+0 zSF!Bes1q8MmAgI3rap=q-i$E$aYoz>#?0~=S@M_;%Ir0tBGOwZ*zn@3yCT){1l|<} zw)pDqYLb3CcQc1yK7h?7;X+J9doSdDad{t+%%a!c@p?WTSAEBSzIRY3d9(_(UCXlG z>|A(t@yQ21*39A>Qd;)B-8u~PWS`#?71nx@79`CVx}f0)Hj4ED@}GGL&4R-Wm2Iaw+*m~*U{yLi#=xL}x}I8IM#9f#-zr>mYG$l9O&%?p&XqWxZy(D?r_#U{)i6H+az{gdi_fa zS@3N69bPVfT7Qhz->*2}Nl{%h>J-M~w}8-F`;DJK)Y9?#c{puQyNZ0Ey~Xox7&mK$ zR=`)U$x5|Iq?k@9oYPz%&0$)HMqCW!jeWH_@4vi@?cU zLwaM!ka4m%mX1bizAtgZaawd_Gjifjj4kh_9DGXG2A&Dj$9?bPynv@CuQ_Va629`% zvg4nHjE)soQIQXJhhO#xT^>dnDv^_qS`_}wH}>PtJ?)Z3l5!dVcNp$#CKpOPxfPd8 zg_kT(s0VRhYb=M!UHPksGW6!dA+%M|8yXrgtk`c9Qm0zIAO1uoFuy#+ByM#4ns-B? zfA`BMtNZ+@s#8XsWTO8aQ@48rW8b@yH|94mrCOn#Z_Nr|Y%)S6^eS~gV|^N=r96Z+ zf(y)by_V*=6`IpyYcw0?$2FZ!<@saY-j6vMC!QZkR<5^$bOFv%OfQg0&`HaSxftFL zJWtzygGxyQRjm(I*rXjf)weYil~&L;70N3s5&!sF@8gQ#GS!9EUWy9|^$ES2<61ao zZtq1*-1+uyc@>tf&r#XP>1fpc<;LpI(bLP!`{bq<-W^5XAi%5? z#N{)X*5CbXs30|G@vG15P?~WPU~T7n)smKyR0Zk%6_tF6#3e&KxVI2=!Q8yoL_aSL zax^8>DOxGNzhjZKEG^@6*a|Evq6H$u)$TZep&-x_0C!9hgLv9_0kqOEtHxjd^e`NG z<#K{C`Xt@6xO<_(Xji2_E024;Jsx{Kx%y0IYFDoc7GrBK>2=o6_w@{jwZIBlqWVSo zd3cg|`&$`sLk#JNpJHovlo%J5%kW^wu}hoS%UgU4Ap1@Oqb9LRQx5NeS`M$PqWFi1I=SeX4S+j>paWB32*}q7x;;00 z+kK?XSk~B=T)2kz92crqcgXO_AcYha6oN;+x1%#)-*p{np>KX@uJz+lm=y)?4F(!0 zD=WVp^$?mF0l={d1qWs3asj?&{9wQ87Z3)@t2}%$03{%@y1C%^Q;rRqY60o?6eDCcBV_qu) zT`BA6M0h)?rVw8|U7P^Gji9erk&wH!^o!rZDs;sXJbN2$UcZGAfj9D7?E6jBd8wtX zVkzNe=;marKeh3HYnE)J3F8vD_X*@fUSfL}xq zyERr_gl;GLz0-T;^?16%Jf>3E)9uevF95^8;xio{1zBcR>)!DHC5_RSvLehq<+afk zn58S%v*I-g9&NHW^H1gLW%rSh+)2@q_r7gm**M^Ftvl(dsHAEbH9yOW|DGI(CzIu- zfSr#<40EVA?-Pl#?b^dRM~zS&VjQrLt_$3>5fS-07w23;bt2}tQ&KAU91|~~`65U} z8i8MUB9&OJg8@UQ7$DpOQy!sTSRYE;*lM}Wd1DS`SwNo}iMIIamEu2Bd)eF*n-(*{ zt$)eHe-bg-(D}?x2!r6;z+hAoZI(2%dH+ zn`YpKsq*dL_aSrSJ`qnYdR>|uC-+8$zi6WmI7rc!@?f*C7h$%D z0PJr&+%Ej@Zvgs6LX&s;Y-~zmS^yq+h8Sb7dj7n0WZtCgH=n3R$=gZY2dEx3tUvkg zC70QA9*j|mUq&#`NyaaESJZD#*V0_i^xBVnpKhEtR`x2MV>r<4UX1}C`l93_Z)$0d z$l%vP4fmZu$pW=l)+I+uOKbzvFoL^WeQn^_#LO(0dv&$Lac?-GC?g|7F-!31?Cgx! zoxP1>>u=h}1HyPh_4P#>Iau{;D^!P@z!D{V@n;_OAK{5TFjfj`KmVi5vAL3bNcttw z6cJDMM0yi@3rblf>0&Z$HFfn2iwUn4-`A706+8Xg4)5vt*ZTZd!@!jhUl|33%JOoF zRyHI6KIlbq`>SHNQNe#}yVm||aZb(kc=0DPzT$Pd*UP1P8)dgam6W2hw2ejafS;1o zI4jIz0-Ff1(syOT4lDB~ zci8|-YJ7Y==%cOtw{PDTUm2p1z7{b?OC)a5;}s!nD??M_&jzr@Dm`F?J@6h`w#P)tbR$tYd3ruLV}TdN-6wZ8PRyx!@O&^vjdD(?p}cq z&H#&)Jj>exMKwWll~&N|P>pczp+1;G9XFXCM%0M#!brptn3&}AKK8`TP@v32$398r z-L}X_3veaW4^#x=5U7COr0X6T_Rw0aBBgPQZXOmWT0UPm;>$t88%u$?VwWsGIGCBf z$WUIKuqaj^Ol#cFva;gxq&&zHl~qaoIdZ1?Zw9jZHGl! z$ucatw6SUzY;vm+bSnLjlY;|C^&;qgis^Gx%CXxQD~;4g&|%`R0ygR}ngn;##VN%l zCEfG;a5u0yj|4J@>G;80nF@FV;xwExg3yUGYw(`SX%;E~JfkY@gwLWf8djenzM0*kft;vpRBAbP*rOw zMu7%2P0-%?G8f-w7$y#0421+90;n`17PPhDDwYc6sk;&{GI6>j>%A37k$0v^8!|G! ze-{_u13pVZP7c{9hEjm-u^-LgZA` z&Ha7?UeTFY`8)MgS$`OO87e9v`!+MLwe&>vH!%Z+;u4fKw6#DwVa7SHu?!JWO?WLBTU814 z5KlcQEsCEUHcLTP%$V%B;N=KpR8+8R9UGH@_*o*qJ3{63Iz7jP@ck%`Xs8p+{=G79t&_v#hu?L1fp?jf!#X72MU=@e;fic1;Ig|W~lH3r_ zWk&{TZ(et&(f^_o5XtN?rCnt*xrhOX1*S**_mOz-bpae3h_MvVa+Wb$Pvvz8P)^>kc5TSkR^`~i>F~hC%ny^ z$nq=ih6*6X3h#`*&5x6n z1fPcVH!sG3pbWqHA^lFMDHvSg^yml=1LH<)`Q|^X+z=2@zPw%pG%p7fUgiVhtmHrl z|Bvk&kkimuW!hb19li5u-q}~f1MgaQyv~&AA(qc-4~@iNPV15=>uK!BqSv9 z#Snw<;NSwT_<_L^(?vD4mxg^K>1mPX5Th9z^4z=d)NqF=dG}jr)f~V^Y1xcxidNa^KMy9;1Y~Srm zxmD7HJs$FdupjO|m=7p6L`VP=D0bHB5Bgvk>RwtLYaCL>By{$V;Kkbtwk>|utU3`@ zg_A}xHQ;a?pzM5{M37j5Jp&5%Q><;mjJA~6a_9(bw{&p#eU?2i4>bzTe^+0BAgoRkNGA|PrxO%C+`U?KPB~BJwEIJ& z$vR!#SYccMfE6U8<0`|1Xt^|k_d{$DDxq!5vN=wc8j69FWSUQwMIqLWeH33-fhyGT zaswL|?m3`psSx~N2SOGFMfe*t7~6N!)Xo>ABF{-Agh-eR#Q*4QLlsi+ozrw>AhAmF7qJ2rYND>5^2=`nAyc1LC zS9Qcea1oq$Q&W=J*2z$|Q|jsIIF*gx5VF)!=iZP8nqmUr|8=2~l0_C4TT{M1ni$`k z2)CI$d^?D}H0QX4cI0=ixg*p2U>1pN7JRWdcC5I648h(Ab2O0qEjQpK2WHtuBgx2{ z#+paGHd~~f4ptNN@IWuWhOtO1*r^Pyv{7XeMfbx3D)ora+ciSk~F z8V6LK7($PS>w~RBaZit7 zpf$_01qBZ!HR5-8NDR26aeXvU&I@8Rc0bVfu(to9dcEW0klaKy%`{RxaY+8U@djs@ zJ9cNP8aR-x+&|X|=M3!5CNLr3ex4j3liVeO#5zaIR|U$@%8J^ViK!z7%FqeTOXy2s z85l9!yHPC_WKq*5W!ve&jZBCX~h4gj>oenLe>1&}NM!sCYIjJbpG zhL(^pP#Ea8FE7y-TU;AQT&p~Y549&YYqZye_kB7!F^61iTa1Sx`PzkbcNom$3S}^o8eRYi^V_ z1h9JJ8k4X|9|b|T#O~b$5iYJ&WC+}`5mK&reFbq@MgXJ{COg5`PaL8a>^=x4BsV%t zOdB(xyOkb04WKki5}VO$VLGRssXUcXPmfzz&L?AtIcsBnMMtvJw&48ILO1*+2#zgC z#10n)Q_yG3`CA}t)KCg#&-7{vMlDc=S_n&2zXRJDOSvW(0E^Ho#5&SHKzNg1QMV=Z zJ`E$0214P1#@15xq*HS81y{nQiuV*Lsb?{&Kk4%-FwPK>lIG8=DlHYPNRcJpvcmN;NEXbldHm!g#q3NhB z(gOTU8dLq5m|2$%JgX5SL8+F$V7KzWWi57nvWCovtH= z%5sh2nyAg4B|DA#50gowl{J#eMT-+HaReXboDhmhU-Q_HCtM zXdo)332dN>cnk)7DvhqlQeKA12dxyAatgl@3X!N{`U(QZBtVJ{q_0F-8VYT ze{D$VcNM}xT@Qn+`Ttso1djlz@UAUwCbm%;6UAy*|b!sYUuGAGci3*amHh&MWU}(Sokm* z8X}YmA@Z$3#gXPr9R41z!_*|Ik@D-Ed*YdB+|V>CF{Q@Lh?RM}Wk;npTUecqpee31?W&?GUp}@igj-1$9rRh8U~dRb2@FM`T`9T5s(c7{ zRe?UDf)n9QP*YKk^6K1Q^FGN6Ex^J?vS^V1w86l(&#hmcR9MPRVw>^H5w8g=oV~W# zcjp$4`(^#1OdR8AfCbZpW|4aS1Co&bZ6D>Egl{N06o1?_hM)jSnKDu%+?ovYzgQ1W z$5s5k4Zv=@O(4hkQYkmd34QM8Wa>xJJu%%K9|#bzi`zGt&+ zzLm*FTsGBrQDDS)K4txh~piIv?s)9bQ1^yM`& zx$hDnvtMe0$`bgr@tY=TdQy1NW<+m(Xf=YEq`*eFhNlPcB4I&?8id!r2nw^9X&DM& zT_y0DsO83Xkjov}xRH*s`2UeYepY|8umL}=;AaD&-`B<(tOn$#mSakp-fT#s2DEI`|si_lhBv-%9Rn0668`HH5e}8 zC6JVw)K0Da0<2_mH}lvKXibGVX~S2|ZMLLu;wlkqt$;`?T`P%y$G1of7z+Tp^6i(YP80SR0lFHEY~hDD(|T5@`fOB*y)&JuGo`*1zp74p)C(px z_Z}{&f<@xjd+&qrpki_h!$IVXE!Cy6JSjK-g`Xs~x*aM_f&IEl|FRX?Q*Y9U%P7c% zthB={oA0VurfMG&V#=2F$Q}B?q}JZUNgHg>AVug0a%mY#h?-jI_pV70XKdaO+o;St zD5?f&(|f!cM8==x8aHyrOTUd-N7>974^@-{LV)>?a{t-*QwVp+sQ&L=E-=I&haEyT za^ZV+^SuV32z#7(`r}O^M3atSH5xxe)o*r*xq&d<_}U+$yct9lr5OifEyYjjGma$D z`lnNo8!dGS;<^)%yZt&4OTeX9XvB>;%}30%f}ar?iGRq zlbG%Q0r>D0zP1Ea_Lg=YtsrCXM%BvI^mM8}9+f{eQpoO`N+b!)$9#kfLLCWa%syq* zp#_4V=n9I|$kn_-;XgVa=19-|zlwwKxneeR=PM8*V;zyq1l*IH20!wSa2XL1?qt=* z0)Vznd(MF^@_8!f$=?GIZBNe>U=uS0!0)X7plJrs<8l5;Gb@WeoYhlQ zMW5YdwUnP1!;H*$>$KcH%oB}US)Lw)g8yVHnVkJ4JTbh~ga(UIDRzl_XF7K#TaGqB zvAq!gG#C`ov^2L(;W_!ag|fsJPO_R;a}r(8i8?=jnWc7#qN=5>wqx(TgX-SJ>EPVB z=l6?Mc>~Ei4UYGjfxgkjO-;A`;b?I&>+-r?pM4%4wy&CVct5|OUJLe!`sL{hU}I7+ zM!az!A#6kjYdtd`vB<7S@_%M2IubQ}bP+jfT-YsFh*s-8@`|0)K`Lu?Z#nGEi@1Uw z89$-nI#P1>lz5JW|6NEb*VGG<*Bm zxE45xKeIqB_XSYd9mCL>+e?)?iI=Fc=2*0PZ~v(L#y5Y~px3%+zv-zvFVeMDRkW$1 zwOt106r0TEs^4s2BA++v*PNO%j>g4CH&sNpX_9>)vsbm^^K`v)^wMm%=!0~=2UKl< z-X@^i*5lx|uVIkIpol<$d~C9)@e0D3h+87K5x|oTjjz&k`%v7u>uE|Dh_8EsMH7&KZW2flP9o!x}YqHjL??QjRpvt7}AeyNJqf zb9~-e-o~rL0~&?9BFCUWzyT`jO$bqiw~la70qW!ZefPpZAaC~3{X7?--eP0JNXq1} z>-D$*Qq-Gtdn3s-sSJ7{v$`$zn=7qOX5(30J`b$zp909U7`FEZ0nW=hhwZ_nB=i<_ zfA8n*SpGM<8X)ETQ519sV88XxBE-amvMT{PU!+p$U7Vf!#}(^TKb;eFgYAHL49DS4 zzi%o%4r6y<;{rK!S<-|)jw!QL4F7QS{!EruA+fBukBj#;wIh8Dt9oG8Sxg!K9kuN{ zx}zVq{9mTMO?_?aog7zhgO=zn*w$KYerhLV^}^TuSroboi;o#PXlhfx_ZOu!aJw9? zTvc?Gl)W|WTSzHMNti!&v=?6}uHSrnN3`Dldj8Py*i6Xleg5lURcwM(YFVhR9KI~m zeo`<&>+=sjS}!+k3+I9Gncc{Uh=>Uz3GJKZk#yVHT?m(b^GJ8j;0jkF!Cc(pdV z_DMB`BsB%!r+e6A29b4zl%QzIWHIc+GHl%Mew3VLlBsT}G_x4#Rl1Du$h^JbBp~TW z$BXrGyN8E|n1Eqe&CzDIp1vMtJj)2}xoLcTEi`}a&|CD4 zxyRLRrFN?$K-_hEdkbJ2Ce@&fMcYbzFeI0GRlrMnT zCHj2dGdv7?$!QgQBY=|C3UHSS!)3JsNyBc_QV3@IH2!ZqJsVq|(Cdt#w1$(WVc?$&`1CGBV>#dx ze1;SEBdTV#D2Fa=H9$!W^F3l|O#$P%>2)e|zS&C-1C!#LXvj%K#kUQ;nGU|Tt>))p zS$`@z8v@QC7_KGm7#7yjUQq;E8Zr`yr4PTg5V!W6&m$WvV+Yw%b2*hp7-aQ>z^MHvtV`9_1KtIWzajX2LiY)WfB5P z$_}ra%di@7OPtpkAWL`pRd7lyj>a`-xzl4#K&jTxqD(z_n~w}Vms^7dSrlBp3YE2y z#GZZiSRt?X(30#UnuoN57w@s{i`dLNLdn5VTX8?T_?ZegI?v~jmoBB(%$JwU<}>fB zpO?O`)^o1S&nG&3cSL-rAAJm7H%qv>Y%^Qk6MuG2Dy2TquebYbK8&&W%zM$_xMX*E zx7~Rfztrj&q;p>Dt8WluC@m&VdGNJA{F=(z%7y829#|W(?y54nXnxE%)hXxE#nO)7 z%)+fdkMP+)-m-HMc-i}2Y#exi9?fxai5t#UQh^RY&dKzz}@Gd(pm z)C3mje2hfHhCIfwgsO4clXd?zojmk~*8_~=yNZemaDG7U{=~L@I+z{8@bECe{n0ZO z4Jq44k_^z~wl_9nths1uYD!8;ZQ}D16L+t#^YilJYop`fz%JrP)SFGL)fz!N_l)EB znV*?gTU1EU$Z%zCcCol_Gp}b~t$@!HN;^l>@ukgsH*B@A#-53n>JjL2o7p|krlx&q zhV-aj*0#1&>zwGIr)p|!tE=xogeJfZ2|Zo%VanrboX~CezP|pQXf^>bcnSW3jNhx) z?ZLTA++(m^>+s#W{yi8&sJ4Diq{dn8`cZw+`)Oqs534F(`SaFRa*7g`CAX|=_Q`5B zrEUiYR45^W>o+4Xcpk&DwwV4PQTJ!(JJGvxtd3`fiMy`-1K+i^*9HJ1d5}4j&TuBO z&Util#yY#|mTKia+*T7oRgScQoFY3|whpg%IuyV~vWP$a5(QN!;pO$H0!uNW%jt64 zTQu6CuIgC(=>AYKf%^3Y9BsgiUC`H^VK=eMVBlIKk-^oCNeUY|WB@iQ{|&+lzEEc( zrLV6K&~XE_RfXSy(;mJj#L@Tl_UhvR=(S)p8_riqataCxPEPF0gS=0n8xT0tC11X* z`-4M!`}o|Qt-t#^0hIM_E-oBgT!KH~`yMAE0Rw(a&i*kQeE4AnnS&{Vxz}XnU@5Pz{q#+w6(@$X!&Yn* z)u}GGJ2N>7h4}EArzgMn8rS$aMYul9Sx1y-)Y5s6NOrM(E07#LYRkl9Xq?lrpgpSm z!Tkkgs?AR8v=>KX)R&gOy}F~8kb)7j?~~i}TFJ(B#JAX-6_4}%@xCE9BEHVM{cuHb z6ly8U%&rVOexsgU#BVXeW=ZUBUpnlcF^#X3I@C6k+DrGWUA*KJe8eO^&Bt`l>=24M zm;vk8s+O5SQx14UCG;zpjJ$xZN@>RUqeFM9;0vwO+9>~v!x|5O1(1dyy zr$_b|PS385KgWMR*1a~Y%x@X7`RtqxfA!Nl?W;=%0>6zoY#D$f#L0;z>kHEN^xL`b z6CbTtTXWA!wv)+_)NUEhR&SKfni%M-A4?9tc7>-sZ64fWn!W|R^Aa85$d2`Q?A?4}hQYiwa&;wIbC?5juu*FY-W^n)^0k8wv zdm_OSKyU5u#^SxYI0gC0IvpNZRqolb<#cLH0jT0DO-C&~Ce zqXy0=b!urAXqX>ocC6ISR=&CJt=)XPt6ujKfJg#Zp%-MD+wV6ckuP6eG5+8l~gB9W$Qo@rH+N+f}c& zfk)-5(?l8iRmA1kd7IM`XLGtHyC19ash#6}21`#x@TQgPc2>TALx<-=bzJSAa1paG zfaVN6M!(aP!B1agt<|6R)>fT7S8cqqJ5N@5cczzKy$pV@&%ZF9ZWOKug4PFt8ocdX zZ`__NekwrUnz97H+SHS;Y?#{jueo=(w<90bxI8Y6-Q4ajwud(9K=GI6=AbBUfXI;Z zVZ*}QyriOn7Qk?K_?3f-{{#m!tr6OX^jSRM661eu8Trt#z~4!2FE^5M?l^H+kAj#g zghvlOqkT=!-AGpJGCP=7ttR&5;kBezMYX3-QI6k(6)PC+ z{H(B{?AV0m1#p~jVDE_w{1YdEe(Z-hKhS;ilB;`T?OE;2(W8%@0Mj``)l(|`=*!DW ze%D8qy})UlSEG3fB0oT$ZeITp%;_IY7XcAo+a>3tH$GGlngOK{^?c$G3HM^~K$Xt7 zg$+>9LY;%9dF4YHHr}gf>v7vZ$ll-f9LIwvB zmBw!TZN1guyU#^UpE2seV-^W3nqBDcT_w63l60&*FB2YcB^iWpo8Eq_U9)_SD>RzIk6>s&Jl} zI@aA;PV97^Mye@}W_B*So9{No1yW%^-stSmQA8Z_)Rk)~O5tr?SrNxVzel$|4}UPo zZr+RM#76!{f(c_J&^Tj2&!pT+rnN+Lo|G(cqyrj>9GTSYe@+m6|ITds-MJ-0GyRuR;4-7aK3evz>Hp{ z$#qY;xiD#l2F^0Sjz(x&5~&~P7fd_f7VWC`b@3O}r^HV(ss*w(=N~Lsb8>QkuE4l) z;_8F1_4NgLZCmOp>8k$4<1t8#fqD8ejI^@)=!{^NP>cXV8#wak`HleiG6=XvLBv8w zJ`a3Z?hS;2iHQjy&Zl+&8{aqD63QIt)Pqc+ALeOgKKo%f#@ktWzr3#4bwBr-_^N)z zkW%&_zW4UYz&;%2c5vz`QLvs2wA4 zzLTb=ro4Pyb#--Y>^p6=HGf>?=g1U-%jF1l}>=wSU&^8gi}NyQ6XQ z=~%hRG#Rh=b$N10d_3)d{__mSfz~ZTJAzhIfF{I=MnlPpTUlBHTpURFoFEj1JRu;? zp@IP884Wcxv7!T5fnSHv7{~%W04R;Ti|fWkg@OfwmPtMqmin3+`~7d-B~)SKYme#0 zGw8y3kVBM(&^qyH8^?Pu2N4XPUq|_6A5NV1z-?M#`9kYKCV$m^7m@YL ztz4)Z4>>ty$VDz5c3T0LIol#uvz<@(!(rQDh^Qkb+wR@6aOt3uMspO|x)fcb#+@8X% z6gw1xnnO_IHDEnt0>;=fe2@XTfXmDW^^;AkO~P<;n?!$jGo zRnkh+Fw~*E8GY(Etg{F5!dYZRbSS zP1bGEXwn$U5yRTaf>O=-GT)bq+gCa!hT2yT|1V@YxL&@^mJV&V`O zBvxY}`=fyOz9ojl4;(SNw) z=Jn1)pU2DVb_!pVjUR0nr0U{Zc97GS<1_{BZw`ecj`0V+3 zu$7w^@2fNBX?eKLVp?4zNxHZXYJA4R4Y|LQV8%QQ;mjIV*-BZ0F;8adas>So2ik3P$}OvJ^iEf=Rmbi8~N3C9~1uS=ts+{v-zWURb!Mt zx=Dmt4W&_O%h>a4)sxHmlW@hM70wE$2p)2a6^mAPCb_T`(NU{J(O*gB^-=2TH}9Ahc`323FVuG3uN^F& zRjmj$hA+QABISju!^FT!8KskoRAe3c1AGFrEHfd~*kY;UQ{7ua&Gkq302h^Rr}yvk zi|5RDRcl_Jncowa-M*}JUfs`Si#6NwwI6)(*?8pp(qc09Fl9B^)Tg!0f~hdjdTaH6klG|N8}%$tQ%Df(LPz^`uqU(+d#1# z#7|F8FVXk_n)$M0=tlmm4N1%o@dvtZr`O^2c&zi<=VoWy;pMY)QeJPT=giltgTKi> z;BccRn~8OHrw$+RsU%DT%%9gl0s+$kCsH`OO{$`(tWX0nyfRDYJfizUe%Z^{r`y>JK+O4edd@RZ6tFN zX4CH=V!L0kOlaS-&n8>#*LHG^SPAcj);n{TDeNAJ;^c<)?mB#2PER7on{!|qCuRT zoJ?c-{ACf^g7pfSfMD>#W(&2D4o@~YOV7u1paW<8V*UWIE^UZySTKsJG zh~sSS?3=^U%%YPHMVsAfE7n(WnAk8@!3xnupb4bK$N)Xpg|ArWJw&X)sT!`YMPIi$|8OkU-%Cy4Gu)@<6IKVU^lx2EZy^(t;V{SLxsn2PtOLWzPq?ES6x4KdbU^PKEp1R@s&^lV)NS-fcaIH9 z{2)TViH|>hkcr!d00!g$)XL=JWMZ6f^u}~VfB#Qv5vI-uvCW%EUT<-J!dOqGC#;_E zbTfQPVI1b1K%+>19EeK;+M#6%s!k{=63b@=&e~L0R}uyalMdO9F(Nh{@et*}nAk~I zMG_|E8#3Haqs+-|oGX4(yJ-*?Y2ESiJWyNlBuV&+g=%N9iiY?|Fo2=9Z8Q{oN z6X=!67|>0A3IGdM3_rWk=ErC9Q$kuqrSjXfqGsu80!v-k5HkD-8AgwI5q4hIeKRF*S+SbSo-qP@I{iy#Y=A z0>l9|jsWf%=zxDUPo`C`>7JTmqM|AVGS=<>RZK`S6N$K{?8k3r(Awby)sdTPn zAP&hw;5`CaP?jU=aLT3~7gilihlg}v z2|2!ZF)S#~FCMiT2%y)mmj4M!APToip>om^ESn(V@H$(Zyp+$Q>{8~VJa?eSj1TLs zr_<(K3fS-E&gkod)??T2@lCs~_50sjC-mQLpEifaa`6k!M=4lY>V1M z;`fJyKsit5`hyp9(^G$UC=3QjBJ-gVkD)#&2UxL_TdP24u{oWK#3>9* zX2YDM+~OJ)ZJ3+z8komRLXEHTnZzU|;Vk?5ry!9Zd202+fa66}E7U;`eoRa-kdSQ7 z2U^g8F{=^|JHm~H#mH&n#aZgdud?eO;PbjquC6}M>-pN)*@;1GAodZqabxd>#YhdW z;bk1-BJ=0^X;?dAx^5%TAoGH0Z^tqL!l*|sD=$x_)7t&@t1y@LHlp?_T(6D6U^%}3 zyZV5gmk}au-KphqqtBOYzf$*gR|WuHk8{3_s3=17pal)44T@x8W%UWT875X60&onU zY|rBH@>BO!YBeEW@R-#O$CCu^2?W0Hy-wt_Gz zNdzWQS{r3<7j=RlblW9(2!sh^@Pqh(y_aaf0D1!WerFe#=&wHHNS7s7QF7kgFbb3-(wl^C$1sJO&Pkr!DkOS6@Z<+uEGSc@4DDU0~A46816b z$O^NlHY-oW`P{v&^ckqha|xy<>y z{m--T9r0}zxTsJ1HAJ*?jB54i#`YM^;-D4RJ>uRXDQ&M-~cLVV)VAZXdLpYorrX*|TAl#uTta;;LzWxFq zkvwz1>w8n1jdp;GaJiP4v=n3Z&XV~uzS`cs zMn?Jm_$pWn0Wd-%5PL|@!q!$C0%Kcghsj36h)K4CEub8Vs>rgj;ystreb$7JIL>df zm)>w7ZXo@;DDFvV-HxkGYzpLoP=w6td6s8jje?n(1d-b@1Pg|c#Oe(wk`W9n!tCq~ zz;CpoYt(KlhLTnW}x_@}4cB`8A%cuQvWxNXEa5n*_IBOqI@?;uI%Mz>+6_m70V2-1yTj zSBm8tnVNox>BT`3XNUe0!-R^83V0!)I0BTU5vXX1iQn2e*6HeAJzv_YcPq26jzqDV zb=H4qT@6gobUm3QSCQ`PTQ}Q^+1oQp?We$35M_Y?wu8^(ia=n@21Go-F^BXHu)4?S zE%rm3kTIZF%|y8J-3U=9S%E2SA%#Unn*v#zAOT|Ev7O!`84_bNGc{FJQDI>Ki7^jk zLjVSEpz8#nNd_{Nx=pRBBWQ~Lfq1mYn>TKhD7Q?nbbv*Ae>B zO*iDYcCU@I1r*mrAn<$fcy&cZj7a->dA+s5KzGyOx|X(a9}!cvi1mF&yV1wI@-LC1 z!Kr|1a&>iW_+qXsB6>UVJ}1HvIHe_WI%misJYm(y3rH}I{`dK6<)Lf0u`kpTjaygM zkgqE>IUL5#0w7+a?G^fRR^h1A?e>L}dof!X)cz}3McB+o85bmbQkc6Km>Ymp5CwQFG`k6Yf^_5} zwMm0J*P`-*9O`RMfw06wrM1ua3qyOz-B@9$SH}PB(;-;}tBP-VpD}mxOq}^iHulnu z%!6ZTtcQQ;OOdMy{fDr&hJ+r=e!7D)R#LL%<-V-j7o0;`L1;=HKltQhlH)MiOe=s5 zsC%=a$8-j+N&>pY*=91kd@_4%_9yw+OcPwKic*wRwIKuF>hKD??1{Ryo8yO3>|^Ia zy4c8%9=FFhLD#^EQF{}adz+h6i&G@*g4zat)IDL!&_)X2m|3K0&8*$2EV;M1v99$6 zkoPqC)+%#T&OLtA5)v73%0>}wRlpPHVsn1ewdbD|{7Na9YT-w(3jHnwD$RVRr@s(X z%0y9|eMl@D3$u>8VfOohHGzGx?OCdBj%=L=)Yt(IM!c=<3oA}wY-u3WM-X&XGB)Dv zI557u(0H2tS&+`?8b}apz820KUZn1r{{H?NzG=w%q<~N6;9sYpw8H7_TbFX7HG}gO zwL3n5UY1t|C^zveZVb1~bgnL{dW0a&2G|6w2+XiyqzmvJ(W6xo6eZW`DzfMt5gi9Z zrl!Uc@r{i9csglb-~Evej`iJ;uRr-kV8DI`WLuFSrHQrY8mdHhdjw%rxx)gDr}Tk2 zX|?bszsiO5Hw-$snHsd480_g+?tOH`$-u+hZ*2NvVrbHQhx%oT0j<(tzxbXS;VR;c zx!EBQvbUth6L4;!r@tSnE1Gd-7i)XHN;@*1tmdbIe}{fw0d7GoK&GnD9hb9IkLDeY zNLAzX>U=Hd0ilgQ89oIq$xwr_V^7*J)(9PI`kx=(??8^zL3LneY5g;5;IFFy^Al2%YW(Z4XXP%~W|Ml&&dS6DJR zPeE{!=NsG#!GE{$_xgtlp(fNajNdSTFO4~0zQmHRauN7xIEW+^#PAe8`YdzW%ST^1 z2(-Pn2oHa*>j;a9F^#n|tC6hK=djtBT+**U3aFHaqu0T+K12Ai%gR$8QRbwTEPfmw zLD8BJb283;?;l>McwEGjE)+>kW%YSF-$h+PoaUsQ{RQM`&(N=bXb2h-QsmExx)N8M zXkj!6k}iz&$@;XG-2qBy-VT75T4jPbO@rEpbeS~4&5can0F{US83C0R&f5K}2v! zR&HcCcdODvx=DneT8{@uUVu3SgO)*%DH0$^!eXZ$Zon&odk@6u{4o-9KSJeze>0Nz zpoyv6ar=y!EH#>9vO~f9-w#^W`ylYpD6pjoq3zAC0SnhJkm#{!=uypw*REn;APoQG zdchou_~n`K*}7W_HzMW1Z%LE=8T8?y-9!miY!D}9B>!D0(WRwv_34$wy1%$@rM=4; zYfTd(u43%vob#90)g2ya@2%&;QiADErk!qyvf13*-3|S-aecP6wK=!8{p`-ZX9%~s zxU#gdxbpPp=Dqc;jXm|Pzc&}NBB_GRoU8)PoUZ!*j54u12Q~3`gq*QT#GJ9QcYpRJ z-b^h#-pov*)y1rjgdzhaJ|VCksFA2+p=`LMPq1`-41X`^Sk&eliie91T@Q#T$#E9X zu@FHalBAytliLo!oh2JpA&V^4$jTTm%8fOiao4U~BBn-3rO^0V#n${Y9_JpP9(<0N zz#rQ_9{!)nm?1A}OQp;h6_i6oY~eH%lWGb{&awq7ij&S|NDt6YcWTm@$S~{7brLtkvd|lIKw$l; z0-Il+1Kdy*+E+I6w}>5kYVIvnXxe}>dx9KqzZ|u4sp?fCrb@?>(;#_b{*C~S(t|>{ zDb|iVyR59|oHS7seU))Hn|TZ|t~s?THL_Zr_%?Hs$bDpWM@yQTGq4`oQCML#8ifux z%cKjm2pfjFe}oUQC5dUFJ=g@}%QGJv=@I--#PP4|q@O(av993$i3Yoc=pFMc~`@QcOl$z|Bs{hg7e{S=7_cL1i2Tfaj>!HQ_UPfNW{%+_ra~n2K z@;1WJfq`FOf9C^@?^M8%8Cj~7FQr(%RR6A#|3zyxB#IbE^2VFq2CY-szc2!76XJ#}BUR}mGPTYP1uVlKlVEAFmL2E52n4^O2$7<@Sr(5LRLg&k z$_)2U29IIS?eSw*)M?C2kV{M|c_3Z-d|^xbS7pWA*!Ow|-2eZxx<9Mq&fd)K&d$Nl zo9V<944C%+#=LQ~7PTM79;fJzd0u~kf(RE(K-FXo;1T~dRKSwoV+oLT#X&<@mJ=C} zNXXxuC%RIY8KG{;qn4Ca$CybUqEucXnJ^W|{=M}>eXG+q+JW!X%h@)`8VHByC~Lxf z$HiqcmZd4s;Z<3YyxduQN$sn{qEh)mBn=ZM45-wF*DU>tSU*V&rIN;ch^Q!_oJQ%_WzNpM zz{4&3K%TMvUD%R=UexrRhz<{Bwwfwz1bQjC;6o%%f$HChLK!<1k50qQ4(st=BxPxg zav#B^8Y(W`afUN z4HdW4J{;S>j{+>WH0Xbh7efU&^1sfU3G(k0bH=!e{r*wh0-j8J}qp9N1+Xx zTp*QEeUbr%pi|rdw^`DN2)cnwj|n51N0}IA*ng+7Kd~E!OX3qLAy*oiuB?nLZ(Ka> zDPvSwi9l%?lZ#PV0NN%xX)?hC`&UGu^<3QvE$=IjMxx~LhpnqP&s1fh~ zNE*P-HiZ6n&4Ar74eUQRHhiV-e>OvZH64ynmX*KPZKzN@W`ojehtU7$kif?K_iq1( zCODNnhS3y2ygm%4pg=H_pWc zPOef^%;Dp(%7Dd5=VJRhh4L9oiT8pUmF&l(u4Z$3UZyiY(k$5$n3gS~cg?}+r`i2y z5lqN43Q8?QPJ{kaZ=VgyWcIa$!2MJ9z_V_MY0!J?kT;>%;xsGCU-2H=c7gmd% zT+u{WE}xVflP*gNTg9At76l}s7o-9-(Z;`mL>S&Bo@Q~DKpQr>*YGE|;SwQ0gNTOB@?;PENlh!)=tbp&=ptEGr~`g_?ii|853h6}CY0 zuc+GUf5X6!nU^V>XEVEI{arvDw#BT`w7;tTUxSwaxvl+wj*cb-7PW7RoIr{ANuLi} zr}E8^$8Zp_w&zno_*B++zsfPkeNHEYn){70CMYeL1Ik!f?TOnFIOsLV`FD5NAKtBl z_=?`xp6e+bPJJhV*nn5gU-bXXMb#>eMKE~JuM?NtZ%sNLt9h_z<@V`+PO4u}*;lEN zlRIaQCzJT>BZ6!Zo%6~SD%uoc9DF=dtIf@_g%Z&bw%&pS08R`C#?^Q1Gm zt!HI2*VBwieG0pN@kOjZ9i$d@w&ZK;qIt>|2p`y)tE(@(b?$V}Y1Ual<0dnvUkP1V zI*qY9lk4o}0>4#%3|f|zzEIkCT*7GCmAEKP=gZ-1G7|PKTO<;+uyU@7>E)~9U9&cy z&fx(zL%?m${&zWku2mEJES=syUHjH5wsdL!jpF#LuBUqxP6;yVo(_2V#6~J$u9x=3 zZ56ssRUW(Lb4DV2+uluFk`1-`29OkYx4T7RjD zE$dhttfEeD01kv~S)?5p7kiJiTz*e_ZZe#H@>iQXPS|a@!}e3c$dxq7E0djF z9v`$%4gc=)sr_KqT9%8?xL!^QTN6;;nz-9?i~GesAz0euF`Dh&J>m2Wz0DV+oLg0@ z)-FG`Xz#O8 z=3e@}EI;!tId&g*9ZtC?xnJKjQ$IP~0lQXZT zZm|KnE?w55V@3AblwCqsIWDPAoF!N%@>W9-X1>GCIo#sOFdq}wJt?-3ITrz}1^L and from . + +You can query the versions of OpenFOAM are currently available on Cirrus +from the command line with `module avail openfoam`. + +Versions from are typically `v8` etc, while +versions from are typically `v2006` +(released June 2020). + +## Useful Links + +- [OpenFOAM Documentation](https://www.openfoam.com/documentation/) + +## Using OpenFOAM on Cirrus + +Any batch script which intends to use OpenFOAM should first load the +appropriate `openfoam` module. You then need to source the `etc/bashrc` +file provided by OpenFOAM to set all the relevant environment variables. +The relevant command is printed to screen when the module is loaded. For +example, for OpenFOAM v8: + + module add openfoam/v8.0 + source ${FOAM_INSTALL_PATH}/etc/bashrc + +You should then be able to use OpenFOAM in the usual way. + +## Example Batch Submisison + +The following example batch submission script would run OpenFOAM on two +nodes, with 36 MPI tasks per node. + + #!/bin/bash + + #SBATCH --nodes=2 + #SBATCH --ntasks-per-node=36 + #SBATCH --exclusive + #SBATCH --time=00:10:00 + + #SBATCH --partition=standard + #SBATCH --qos=standard + + # Load the openfoam module and source the bashrc file + + module load openfoam/v8.0 + source ${FOAM_INSTALL_PATH}/etc/bashrc + + # Compose OpenFOAM work in the usual way, except that parallel + # executables are launched via srun. For example: + + srun interFoam -parallel + +A SLURM submission script would usually also contain an account token of +the form + + #SBATCH --account=your_account_here + +where the your_account_here should be +replaced by the relevant token for your account. This is available from +SAFE with your budget details. diff --git a/docs/software-packages/orca.md b/docs/software-packages/orca.md new file mode 100644 index 00000000..e7356b3d --- /dev/null +++ b/docs/software-packages/orca.md @@ -0,0 +1,75 @@ +# ORCA + +ORCA is an ab initio quantum chemistry program package that contains +modern electronic structure methods including density functional theory, +many-body perturbation, coupled cluster, multireference methods, and +semi-empirical quantum chemistry methods. Its main field of application +is larger molecules, transition metal complexes, and their spectroscopic +properties. ORCA is developed in the research group of Frank Neese. The +free version is available only for academic use at academic +institutions. + +## Useful Links + +- [ORCA Forum](https://orcaforum.kofo.mpg.de/app.php/portal) + +## Using ORCA on Cirrus + +ORCA is available for academic use on ARCHER2 only. If you wish to use +ORCA for commercial applications, you must contact the ORCA developers. + +ORCA cannot use GPUs. + +## Running parallel ORCA jobs + +The following script will run an ORCA job on the Cirrus using 4 MPI +processes on a single node, each MPI process will be placed on a +separate physical core. It assumes that the input file is `h2o_2.inp` + + #!/bin/bash + + # job options (name, compute nodes, job time) + #SBATCH --job-name=ORCA_test + #SBATCH --nodes=1 + #SBATCH --tasks-per-node=4 + + #SBATCH --time=0:20:0 + + #SBATCH --account=[budget code] + #SBATCH --partition=standard + #SBATCH --qos=standard + + # Load ORCA module + module load orca + + # Launch the ORCA calculation + # * You must use "$ORCADIR/orca" so the application has the full executable path + # * Do not use "srun" to launch parallel ORCA jobs as they use interal ORCA routines to launch in parallel + # * Remember to change the name of the input file to match your file name + $ORCADIR/orca h2o_2.inp + +The example input file `h2o_2.inp` contains: + + ! DLPNO-CCSD(T) cc-pVTZ cc-pVTZ/C cc-pVTZ/jk rijk verytightscf TightPNO LED + # Specify number of processors + %pal + nprocs 4 + end + # Specify memory + %maxcore 12000 + %mdci + printlevel 3 + end + * xyz 0 1 + O 1.327706 0.106852 0.000000 + H 1.612645 -0.413154 0.767232 + H 1.612645 -0.413154 -0.767232 + O -1.550676 -0.120030 -0.000000 + H -0.587091 0.053367 -0.000000 + H -1.954502 0.759303 -0.000000 + * + %geom + Fragments + 2 {3:5} end + end + end diff --git a/docs/software-packages/qe.md b/docs/software-packages/qe.md new file mode 100644 index 00000000..89aa24ed --- /dev/null +++ b/docs/software-packages/qe.md @@ -0,0 +1,47 @@ +# Quantum Espresso (QE) + +[Quantum Espresso](http://www.quantum-espresso.org/) is an integrated +suite of Open-Source computer codes for electronic-structure +calculations and materials modeling at the nanoscale. It is based on +density-functional theory, plane waves, and pseudopotentials. + +## Useful Links + +- [QE User Guides](http://www.quantum-espresso.org/users-manual/) +- [QE Tutorials](http://www.quantum-espresso.org/tutorials/) + +## Using QE on Cirrus + +QE is Open Source software and is freely available to all Cirrus users. + +## Running parallel QE jobs + +QE can exploit multiple nodes on Cirrus and will generally be run in +exclusive mode over more than one node. + +For example, the following script will run a QE pw.x job using 4 nodes +(144 cores). + + #!/bin/bash + # + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=pw_test + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --time=0:20:0 + # Make sure you are not sharing nodes with other users + #SBATCH --exclusive + + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load QE and MPI modules + module load quantum-espresso + + # Run using input in test_calc.in + srun pw.x -i test_cals.in diff --git a/docs/software-packages/specfem3d.md b/docs/software-packages/specfem3d.md new file mode 100644 index 00000000..b9007609 --- /dev/null +++ b/docs/software-packages/specfem3d.md @@ -0,0 +1,55 @@ +# SPECFEM3D Cartesian + +[SPECFEM3D Cartesian](https://geodynamics.org/cig/software/specfem3d/), +simulates acoustic (fluid), elastic (solid), coupled acoustic/elastic, +poroelastic or seismic wave propagation in any type of conforming mesh +of hexahedra (structured or not.) It can, for instance, model seismic +waves propagating in sedimentary basins or any other regional geological +model following earthquakes. It can also be used for non-destructive +testing or for ocean acoustics. + +## Useful Links + +- [SPECFEM3D User + Resources](https://geodynamics.org/cig/software/specfem3d/#users/) +- [SPECFEM3D + Wiki](https://wiki.geodynamics.org/software:specfem3d:start) + +## Using SPECFEM3D Cartesian on Cirrus + +SPECFEM3D Cartesian is freely available to all Cirrus users. + +Running parallel SPECFEM3D Cartesian jobs +---------------------------------------- + +SPECFEM3D can exploit multiple nodes on Cirrus and will generally be run +in exclusive mode over more than one node. Furthermore, it can be run on +the GPU nodes. + +For example, the following script will run a SPECFEM3D job using 4 nodes +(144 cores) with pure MPI. + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=SPECFEM3D_Example + #SBATCH --time=1:0:0 + #SBATCH --exclusive + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + + # Load SPECFEM3D module + module load specfem3d + + # Run using input in input.namd + srun xspecfem3D diff --git a/docs/software-packages/starccm+.md b/docs/software-packages/starccm+.md new file mode 100644 index 00000000..ecab97c3 --- /dev/null +++ b/docs/software-packages/starccm+.md @@ -0,0 +1,134 @@ +# STAR-CCM+ + +STAR-CCM+ is a computational fluid dynamics (CFD) code and beyond. It +provides a broad range of validated models to simulate disciplines and +physics including CFD, computational solid mechanics (CSM), +electromagnetics, heat transfer, multiphase flow, particle dynamics, +reacting flow, electrochemistry, aero-acoustics and rheology; the +simulation of rigid and flexible body motions with techniques including +mesh morphing, overset mesh and six degrees of freedom (6DOF) motion; +and the ability to combine and account for the interaction between the +various physics and motion models in a single simulation to cover your +specific application. + +## Useful Links + +> - [Information about STAR-CCM+ by +> Siemens](https://mdx.plm.automation.siemens.com/star-ccm-plus) + +## Licensing + +All users must provide their own licence for STAR-CCM+. Currently we +only support Power on Demand (PoD) licenses + +For queries about other types of license options please contact the +[Cirrus Helpdesk](mailto:support@cirrus.ac.uk) with the relevant +details. + +## Using STAR-CCM+ on Cirrus: Interactive remote GUI Mode + +A fast and responsive way of running with a GUI is to install STAR-CCM+ +on your local Windows(7 or 10) or Linux workstation. You can then start +your local STAR-CCM+ and connect to Cirrus in order to submit new jobs +or query the status of running jobs. + +You will need to setup passwordless SSH connections to Cirrus. + +### Jobs using Power on Demand (PoD) licences + +You can then start the STAR-CCM+ server on the compute nodes. The +following script starts the server: + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=STAR-CCM_test + #SBATCH --time=0:20:0 + #SBATCH --exclusive + #SBATCH --nodes=14 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load the default HPE MPI environment + module load mpt + module load starccm+ + + export SGI_MPI_HOME=$MPI_ROOT + export PATH=$STARCCM_EXE:$PATH + export LM_LICENSE_FILE=48001@192.168.191.10 + export CDLMD_LICENSE_FILE=48001@192.168.191.10 + + scontrol show hostnames $SLURM_NODELIST > ./starccm.launcher.host.$SLURM_JOB_ID.txt + starccm+ -clientldlibpath /scratch/sw/libnsl/1.3.0/lib/ -ldlibpath /scratch/sw/libnsl/1.3.0/lib/ -power -podkey -licpath 48001@192.168.191.10 -server -machinefile ./starccm.launcher.host.$SLURM_JOB_ID.txt -np 504 -rsh ssh + +You should replace "" with your PoD licence key. + +### Automatically load and start a Star-CCM+ simulation + +You can use the "-batch" option to automatically load and start a +Star-CCM+ simulation. + +Your submission script will look like this (the only difference with the +previous examples is the "starccm+" line) + + #!/bin/bash + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=STAR-CCM_test + #SBATCH --time=0:20:0 + #SBATCH --exclusive + #SBATCH --nodes=14 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + + # Replace [budget code] below with your budget code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load the default HPE MPI environment + module load mpt + module load starccm+ + + export SGI_MPI_HOME=$MPI_ROOT + export PATH=$STARCCM_EXE:$PATH + export LM_LICENSE_FILE=48001@192.168.191.10 + export CDLMD_LICENSE_FILE=48001@192.168.191.10 + + scontrol show hostnames $SLURM_NODELIST > ./starccm.launcher.host.$SLURM_JOB_ID.txt + starccm+ -clientldlibpath /scratch/sw/libnsl/1.3.0/lib/ -ldlibpath /scratch/sw/libnsl/1.3.0/lib/ -power -podkey -licpath 48001@192.168.191.10 -batch simulation.java -machinefile ./starccm.launcher.host.$SLURM_JOB_ID.txt -np 504 -rsh ssh + +This script will load the file "simulation.java". You can find +instructions on how to write a suitable "simulation.java" +[here](https://mdx.plm.automation.siemens.com/star-ccm-plus) + +The file "simulation.java" must be in the same directory as your Slurm +submission script (or you can provide a full path). + +### Local Star-CCM+ client configuration + +Start your local STAR-CCM+ application and connect to your server. Click +on the File -\> "Connect to Server..." option and use the following +settings: + +- Host: name of first Cirrus compute node (use 'qtsat', e.g. r1i0n32) +- Port: the number that you specified in the submission script + +Select the "Connect through SSH tunnel" option and use: + +- SSH Tunnel Host: cirrus-login0.epcc.ed.ac.uk +- SSH Tunnel Host Username: use your Cirrus username +- SSH Options: -agent + +Your local STAR-CCM+ client should now be connected to the remote +server. You should be able to run a new simulation or interact with an +existing one. diff --git a/docs/software-packages/vasp.md b/docs/software-packages/vasp.md new file mode 100644 index 00000000..74e1dc4b --- /dev/null +++ b/docs/software-packages/vasp.md @@ -0,0 +1,125 @@ +# VASP + +The [Vienna Ab initio Simulation Package (VASP)](http://www.vasp.at) is +a computer program for atomic scale materials modelling, e.g. electronic +structure calculations and quantum-mechanical molecular dynamics, from +first principles. + +VASP computes an approximate solution to the many-body Schrödinger +equation, either within density functional theory (DFT), solving the +Kohn-Sham equations, or within the Hartree-Fock (HF) approximation, +solving the Roothaan equations. Hybrid functionals that mix the +Hartree-Fock approach with density functional theory are implemented as +well. Furthermore, Green's functions methods (GW quasiparticles, and +ACFDT-RPA) and many-body perturbation theory (2nd-order Møller-Plesset) +are available in VASP. + +In VASP, central quantities, like the one-electron orbitals, the +electronic charge density, and the local potential are expressed in +plane wave basis sets. The interactions between the electrons and ions +are described using norm-conserving or ultrasoft pseudopotentials, or +the projector-augmented-wave method. + +To determine the electronic groundstate, VASP makes use of efficient +iterative matrix diagonalisation techniques, like the residual +minimisation method with direct inversion of the iterative subspace +(RMM-DIIS) or blocked Davidson algorithms. These are coupled to highly +efficient Broyden and Pulay density mixing schemes to speed up the +self-consistency cycle. + +## Useful Links + +- [VASP Manual](http://cms.mpi.univie.ac.at/vasp/vasp/vasp.html) +- [VASP + Licensing](http://www.vasp.at/index.php/faqs/71-how-can-i-purchase-a-vasp-license) + +## Using VASP on Cirrus + +CPU and GPU versions of VASP are available on Cirrus + +**VASP is only available to users who have a valid VASP licence. VASP 5 +and VASP 6 are separate packages on Cirrus and requests for access need +to be made separately for the two versions via SAFE.** + +If you have a VASP 5 or VASP 6 licence and wish to have access to VASP +on Cirrus please request access through SAFE: + +- [How to request access to package + groups](https://epcced.github.io/safe-docs/safe-for-users/#how-to-request-access-to-a-package-group-licensed-software-or-restricted-features) + +Once your access has been enabled, you access the VASP software using +the `vasp` modules in your job submission script. You can see which +versions of VASP are currently available on Cirrus with + + module avail vasp + +Once loaded, the executables are called: + +- vasp_std - Multiple k-point version +- vasp_gam - GAMMA-point only version +- vasp_ncl - Non-collinear version + +All executables include the additional MD algorithms accessed via the +`MDALGO` keyword. + +## Running parallel VASP jobs - CPU + +The CPU version of VASP can exploit multiple nodes on Cirrus and will +generally be run in exclusive mode over more than one node. + +The following script will run a VASP job using 4 nodes (144 cores). + + #!/bin/bash + + # job options (name, compute nodes, job time) + #SBATCH --job-name=VASP_CPU_test + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --exclusive + #SBATCH --time=0:20:0 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + # Replace [partition name] below with your partition name (e.g. standard,gpu) + #SBATCH --partition=[partition name] + # Replace [qos name] below with your qos name (e.g. standard,long,gpu) + #SBATCH --qos=[qos name] + + # Load VASP version 6 module + module load vasp/6 + + # Set number of OpenMP threads to 1 + export OMP_NUM_THREADS=1 + + # Run standard VASP executable + srun --hint=nomultithread --distribution=block:block vasp_std + +## Running parallel VASP jobs - GPU + +The GPU version of VASP can exploit multiple GPU across multiple nodes, +you should benchmark your system to ensure you understand how many GPU +can be used in parallel for your calculations. + +The following script will run a VASP job using 2 GPU on 1 node. + + #!/bin/bash + + # job options (name, compute nodes, job time) + #SBATCH --job-name=VASP_GPU_test + #SBATCH --nodes=1 + #SBATCH --gres=gpu:2 + #SBATCH --time=0:20:0 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + #SBATCH --partition=gpu + #SBATCH --qos=gpu + + # Load VASP version 6 module + module load vasp/6/6.3.2-gpu-nvhpc22 + + # Set number of OpenMP threads to 1 + export OMP_NUM_THREADS=1 + + # Run standard VASP executable with 1 MPI process per GPU + srun --ntasks=2 --cpus-per-task=10 --hint=nomultithread --distribution=block:block vasp_std diff --git a/docs/software-tools/ddt.md b/docs/software-tools/ddt.md new file mode 100644 index 00000000..0838dbd4 --- /dev/null +++ b/docs/software-tools/ddt.md @@ -0,0 +1,137 @@ +# Debugging using Arm DDT + +The Arm Forge tool suite is installed on Cirrus. This includes DDT, +which is a debugging tool for scalar, multi-threaded and large-scale +parallel applications. To compile your code for debugging you will +usually want to specify the `-O0` option to turn off all code +optimisation (as this can produce a mismatch between source code line +numbers and debugging information) and `-g` to include debugging +information in the compiled executable. To use this package you will +need to log in to Cirrus with X11-forwarding enabled, load the Arm Forge +module and execute `forge`: + + module load forge + forge + +## Debugging runs on the login nodes + +You can execute and debug your MPI code on the login node which is +useful for immediate development work with short, small, simple runs to +avoid having to wait in the queue. Firstly ensure you have loaded the +`mpt` module and any other dependencies of your code, then start Forge +and click *Run*. Fill in the necessary details of your code under the +*Application* pane, then tick the *MPI* tick box, specify the number of +MPI processes you wish to run and ensure the implementation is set to +*HPE MPT (2.18+)*. If this is not set correctly then you can update the +configuration by clicking the *Change* button and selecting this option +on the *MPI/UPC Implementation* field of the system pane. When you are +happy with this hit *Run* to start. + +## Debugging runs on the compute nodes + +This involves DDT submitting your job to the queue, and as soon as the +compute nodes start executing you will drop into the debug session and +be able to interact with your code. Start Forge and click on *Run*, then +in the *Application* pane provide the details needed for your code. Then +tick the *MPI* box -- when running on the compute nodes, you must set +the MPI implementation to *Slurm (generic)*. You must also tick the +*Submit to Queue* box. Clicking the *Configure* button in this section, +you must now choose the submission template. One is provided for you at +`/mnt/lustre/indy2lfs/sw/arm/forge/latest/templates/cirrus.qtf` which +you should copy and modify to suit your needs. You will need to load any +modules required for your code and perform any other necessary setup, +such as providing extra sbatch options, i.e., whatever is needed for +your code to run in a normal batch job. + + + +!!! Note + + The current Arm Forge licence permits use on the Cirrus CPU nodes only. + The licence does not permit use of DDT/MAP for codes that run on the + Cirrus GPUs. + + +Back in the DDT run window, you can click on *Parameters* in the same +queue pane to set the partition and QoS to use, the account to which the +job should be charged, and the maximum walltime. You can also now look +at the *MPI* pane again and select the number of processes and nodes to +use. Finally, clicking *Submit* will place the job in the queue. A new +window will show you the queue until the job starts at which you can +start to debug. + +## Memory debugging with DDT + +If you are dynamically linking your code and debugging it on the login +node then this is fine (just ensure that the *Preload the memory +debugging library* option is ticked in the *Details* pane.) If you are +dynamically linking but intending to debug running on the compute nodes, +or statically linking then you need to include the compile option +`-Wl,--allow-multiple-definition` and explicitly link your executable +with Allinea's memory debugging library. The exactly library to link +against depends on your code; `-ldmalloc` (for no threading with C), +`-ldmallocth` (for threading with C), `-ldmallocxx` (for no threading +with C++) or `-ldmallocthcxx` (for threading with C++). The library +locations are all set up when the *forge* module is loaded so these +libraries should be found without further arguments. + +## Remote Client + +Arm Forge can connect to remote systems using SSH so you can run the +user interface on your desktop or laptop machine without the need for X +forwarding. Native remote clients are available for Windows, macOS and +Linux. You can download the remote clients from the [Arm +website](https://developer.arm.com/downloads/-/arm-forge). No licence +file is required by a remote client. + + + +!!! Note + + The same versions of Arm Forge must be installed on the local and remote + systems in order to use DDT remotely. + + + +To configure the remote client to connect to Cirrus, start it and then +click on the *Remote Launch* drop-down box and click on *Configure*. In +the new window, click *Add* to create a new login profile. For the +hostname you should provide `username@login.cirrus.ac.uk` where +`username` is your login username. For *Remote Installation Directory*\* +enter `/mnt/lustre/indy2lfs/sw/arm/forge/latest`. To ensure your SSH +private key can be used to connect, the SSH agent on your local machine +should be configured to provide it. You can ensure this by running +`ssh-add ~/.ssh/id_rsa_cirrus` before using the Forge client where you +should replace `~/.ssh/id_rsa_cirrus` with the path to the key you +normally use to log in to Cirrus. This should persist until your local +machine is restarted --only then should you have to re-run `ssh-add`. + +If you only intend to debug jobs on the compute nodes no further +configuration is needed. If however you want to use the login nodes, you +will likely need to write a short bash script to prepare the same +environment you would use if you were running your code interactively on +the login node -- otherwise, the necessary libraries will not be found +while running. For example, if using MPT, you might create a file in +your home directory containing only one line: + + module load mpt + +In your local Forge client you should then edit the *Remote Script* +field in the Cirrus login details to contain the path to this script. +When you log in the script will be sourced and the software provided by +whatever modules it loads become usable. + +When you start the Forge client, you will now be able to select the +Cirrus login from the Remote Launch drop-down box. After providing your +usual login password the connection to Cirrus will be established and +you will be able to start debugging. + +You can find more detailed information +[here](https://developer.arm.com/documentation/101136/2011/Arm-Forge/Connecting-to-a-remote-system). + +## Getting further help on DDT + +- [DDT + website](https://www.arm.com/products/development-tools/server-and-hpc/forge/ddt) +- [DDT user + guide](https://developer.arm.com/documentation/101136/22-1-3/DDT?lang=en) diff --git a/docs/software-tools/intel-vtune.md b/docs/software-tools/intel-vtune.md new file mode 100644 index 00000000..14d8c407 --- /dev/null +++ b/docs/software-tools/intel-vtune.md @@ -0,0 +1,144 @@ +# Intel VTune + +## Profiling using VTune + +Intel VTune allows profiling of compiled codes, and is particularly +suited to analysing high performance applications involving threads +(OpenMP), and MPI (or some combination thereof). + +Using VTune is a two-stage process. First, an application is compiled +using an appropriate Intel compiler and run in a "collection" phase. The +results are stored to file, and may then be inspected interactively via +the VTune GUI. + +## Collection + +Compile the application in the normal way, and run a batch job in +exclusive mode to ensure the node is not shared with other jobs. An +example is given below. + +Collection of performance data is based on a `collect` option, which +defines which set of hardware counters are monitered in a given run. As +not all counters are available at the same time, a number of different +collections are available. A different one may be relevant if interested +in different aspects of performance. Some standard options are: + +`vtune -collect=performance-snapshot` may be used to product a text +summary of performance (typically to standard output), which can be used +as a basis for further investigation. + +`vtune -collect=hotspots` produces a more detailed analysis which can be +used to inspect time taken per function and per line of code. + +`vtune -collect=hpc-performance` may be useful for HPC codes. + +`vtune --collect=meory-access` will provide figures for memory-related +measures including application memory bandwidth. + +Use `vtune --help collect` for a full summary of collection options. +Note that not all options are available (e.g., prefer NVIDIA profiling +for GPU codes). + +### Example SLURM script + +Here we give an example of profiling an application which has been +compiled with Intel 20.4 and requests the `memory-access` collection. We +assume the application involves OpenMP threads, but no MPI. + + #!/bin/bash + + #SBATCH --time=00:10:00 + #SBATCH --nodes=1 + #SBATCH --exclusive + + #SBATCH --partition=standard + #SBATCH --qos=standard + + export OMP_NUM_THREADS=18 + + # Load relevant (cf. compile-time) Intel options + module load intel-20.4/compilers + module load intel-20.4/vtune + + vtune -collect=memory-access -r results-memory ./my_application + +Profiling will generate a certain amount of additional text information; +this appears on standard output. Detailed profiling data will be stored +in various files in a sub-directory, the name of which can be specified +using the `-r` option. + +Notes + +- Older Intel compilers use `amplxe-cl` instead of `vtune` as the + command for collection. Some existing features still reflect this + older name. Older versions do not offer the "performance-snapshot" + collection option. + +- Extra time should be allowed in the wall clock time limit to allow for + processing of the profiling data by `vtune` at the end of the run. In + general, a short run of the application (a few minutes at most) should + be tried first. + +- A warning may be issued: + + amplxe: Warning: Access to /proc/kallsyms file is limited. Consider + changing /proc/sys/kernel/kptr_restrict to 0 to enable resolution of + OS kernel and kernel modules symbols. + + This may be safely ignored. + +- A warning may be issued: + + amplxe: Warning: The specified data limit of 500 MB is reached. Data + collection is stopped. amplxe: Collection detached. + + This can be safely ignored, as a working result will still be + obtained. It is possible to increase the limit via the `-data-limit` + option (500 MB is the default). However, larger data files can take an + extremely long time to process in the report stage at the end of the + run, and so the option is not recommended. + +- For Intel 20.4, the `--collect=hostspots` option has been observed to + be problematic. We suggest it is not used. + +### Profiling an MPI code + +Intel VTune can also be used to profile MPI codes. It is recommended +that the relavant Intel MPI module is used for compilation. The +following example uses Intel 18 with the older `amplxe-cl` command: + + #!/bin/bash + + #SBATCH --time=00:10:00 + #SBATCH --nodes=2 + #SBATCH --exclusive + + #SBATCH --partition=standard + #SBATCH --qos=standard + + export OMP_NUM_THREADS=18 + + module load intel-mpi-18 + module load intel-compilers-18 + module load intel-vtune-18 + + mpirun -np 4 -ppn 2 amplxe-cl -collect hotspots -r vtune-hotspots \ + ./my_application + +Note that the Intel MPI launcher `mpirun` is used, and this precedes the +VTune command. The example runs a total of 4 MPI tasks (`-np 4`) with +two tasks per node (`-ppn 2`). Each task runs 18 OpenMP threads. + +## Viewing the results + +We recommend that the latest version of the VTune GUI is used to view +results; this can be run interactively with an appropriate X connection. +The latest version is available via + + $ module load oneapi + $ module load vtune/latest + $ vtune-gui + +From the GUI, navigate to the appropriate results file to load the +analysis. Note that the latest version of VTune will be able to read +results generated with previous versions of the Intel compilers. diff --git a/docs/software-tools/scalasca.md b/docs/software-tools/scalasca.md new file mode 100644 index 00000000..b4b6dbea --- /dev/null +++ b/docs/software-tools/scalasca.md @@ -0,0 +1,56 @@ +# Profiling using Scalasca + +Scalasca is installed on Cirrus, which is an open source performance +profiling tool. Two versions are provided, using GCC 8.2.0 and the Intel +19 compilers; both use the HPE MPT library to provide MPI and SHMEM. An +important distinction is that the GCC+MPT installation cannot be used to +profile Fortran code as MPT does not provide GCC Fortran module files. +To profile Fortran code, please use the Intel+MPT installation. + +Loading the one of the modules will autoload the correct compiler and +MPI library: + + module load scalasca/2.6-gcc8-mpt225 + +or + + module load scalasca/2.6-intel19-mpt225 + +Once loaded, the profiler may be run with the `scalasca` or `scan` +commands, but the code must first be compiled first with the Score-P +instrumentation wrapper tool. This is done by prepending the compilation +commands with `scorep`, e.g.: + + scorep mpicc -c main.c -o main + scorep mpif90 -openmp main.f90 -o main + +Advanced users may also wish to make use of the Score-P API. This allows +you to manually define function and region entry and exit points. + +You can then profile the execution during a Slurm job by prepending your +`srun` commands with one of the equivalent commands `scalasca -analyze` +or `scan -s`: + + scalasca -analyze srun ./main + scan -s srun ./main + +You will see some output from Scalasca to stdout during the run. +Included in that output will be the name of an experiment archive +directory, starting with *scorep\_*, which will be created in the +working directory. If you want, you can set the name of the directory by +exporting the `SCOREP_EXPERIMENT_DIRECTORY` environment variable in your +job script. + +There is an associated GUI called Cube which can be used to process and +examine the experiment results, allowing you to understand your code's +performance. This has been made available via a Singularity container. +To start it, run the command `cube` followed by the file in the +experiment archive directory ending in *.cubex* (or alternatively the +whole archive), as seen below: + + cube scorep_exp_1/profile.cubex + +The Scalasca quick reference guide found +[here](https://apps.fz-juelich.de/scalasca/releases/scalasca/2.6/docs/QuickReference.pdf) +provides a good overview of the toolset's use, from instrumentation and +use of the API to analysis with Cube. diff --git a/docs/stylesheets/cirrus - Copy.css b/docs/stylesheets/cirrus - Copy.css new file mode 100644 index 00000000..f94d5d40 --- /dev/null +++ b/docs/stylesheets/cirrus - Copy.css @@ -0,0 +1,53 @@ +:root { + --md-primary-fg-color: #33806E; + --md-accent-fg-color: #201F5E; +} + +[data-md-color-scheme=default] { + --md-default-fg-color:#000000de; + --md-default-fg-color--light:#0000008a; + --md-default-fg-color--lighter:#00000052; + --md-default-fg-color--lightest:#00000012; + --md-default-bg-color:#fff; + --md-default-bg-color--light:#ffffffb3; + --md-default-bg-color--lighter:#ffffff4d; + --md-default-bg-color--lightest:#ffffff1f; + --md-code-fg-color:#36464e; + --md-code-bg-color:#D3F5FD; + --md-code-hl-color:#ffff0080; + --md-code-hl-number-color:#d52a2a; + --md-code-hl-special-color:#db1457; + --md-code-hl-function-color:#a846b9; + --md-code-hl-constant-color:#6e59d9; + --md-code-hl-keyword-color:#3f6ec6; + --md-code-hl-string-color:#1c7d4d; + --md-code-hl-name-color:var(--md-code-fg-color); + --md-code-hl-operator-color:var(--md-default-fg-color--light); + --md-code-hl-punctuation-color:var(--md-default-fg-color--light); + --md-code-hl-comment-color:var(--md-default-fg-color--light); + --md-code-hl-generic-color:var(--md-default-fg-color--light); + --md-code-hl-variable-color:var(--md-default-fg-color--light); + --md-typeset-color:var(--md-default-fg-color); + --md-typeset-a-color:var(--md-primary-fg-color); + --md-typeset-mark-color:#ffff0080; + --md-typeset-del-color:#f5503d26; + --md-typeset-ins-color:#0bd57026; + --md-typeset-kbd-color:#fafafa; + --md-typeset-kbd-accent-color:#fff; + --md-typeset-kbd-border-color:#b8b8b8; + --md-typeset-table-color:#0000001f; + --md-typeset-table-color--light:rgba(0,0,0,.035); + --md-admonition-fg-color:var(--md-default-fg-color); + --md-admonition-bg-color:var(--md-default-bg-color); + --md-warning-fg-color:#000000de; + --md-warning-bg-color:#ff9; + --md-footer-fg-color:#fff; + --md-footer-fg-color--light:#ffffffb3; + --md-footer-fg-color--lighter:#ffffff73; + --md-footer-bg-color:#000000de; + --md-footer-bg-color--dark:#00000052; + --md-shadow-z1:0 0.2rem 0.5rem #0000000d,0 0 0.05rem #0000001a; + --md-shadow-z2:0 0.2rem 0.5rem #0000001a,0 0 0.05rem #00000040; + --md-shadow-z3:0 0.2rem 0.5rem #0003,0 0 0.05rem #00000059 +} + diff --git a/docs/stylesheets/cirrus.css b/docs/stylesheets/cirrus.css new file mode 100644 index 00000000..c0b835bf --- /dev/null +++ b/docs/stylesheets/cirrus.css @@ -0,0 +1,22 @@ +:root { + --md-primary-fg-color: #33806E; + --md-accent-fg-color: #201F5E; +} + +[data-md-color-scheme=default] { + --md-code-fg-color:#36464e; + --md-code-bg-color:#f0fcff; + --md-admonition-fg-color:var(--md-default-fg-color); + --md-admonition-bg-color:var(--md-default-bg-color); + --md-warning-fg-color:#000000de; + --md-warning-bg-color:#ff9; + --md-footer-fg-color:#fff; + --md-footer-fg-color--light:#ffffffb3; + --md-footer-fg-color--lighter:#ffffff73; + --md-footer-bg-color:#000000de; + --md-footer-bg-color--dark:#00000052; + --md-shadow-z1:0 0.2rem 0.5rem #0000000d,0 0 0.05rem #0000001a; + --md-shadow-z2:0 0.2rem 0.5rem #0000001a,0 0 0.05rem #00000040; + --md-shadow-z3:0 0.2rem 0.5rem #0003,0 0 0.05rem #00000059 +} + diff --git a/docs/user-guide/batch.md b/docs/user-guide/batch.md new file mode 100644 index 00000000..d6f9a94d --- /dev/null +++ b/docs/user-guide/batch.md @@ -0,0 +1,1026 @@ +# Running Jobs on Cirrus + +As with most HPC services, Cirrus uses a scheduler to manage access to +resources and ensure that the thousands of different users of system are +able to share the system and all get access to the resources they +require. Cirrus uses the Slurm software to schedule jobs. + +Writing a submission script is typically the most convenient way to +submit your job to the scheduler. Example submission scripts (with +explanations) for the most common job types are provided below. + +Interactive jobs are also available and can be particularly useful for +developing and debugging applications. More details are available below. + + + +!!! Hint + + If you have any questions on how to run jobs on Cirrus do not hesitate to contact the [Cirrus Service Desk](mailto:support@cirrus.ac.uk). + + + +You typically interact with Slurm by issuing Slurm commands from the +login nodes (to submit, check and cancel jobs), and by specifying Slurm +directives that describe the resources required for your jobs in job +submission scripts. + +## Basic Slurm commands + +There are three key commands used to interact with the Slurm on the +command line: + +- `sinfo` - Get information on the partitions and resources available +- `sbatch jobscript.slurm` - Submit a job submission script (in this + case called: `jobscript.slurm`) to the scheduler +- `squeue` - Get the current status of jobs submitted to the scheduler +- `scancel 12345` - Cancel a job (in this case with the job ID `12345`) + +We cover each of these commands in more detail below. + +### `sinfo`: information on resources + +`sinfo` is used to query information about available resources and +partitions. Without any options, `sinfo` lists the status of all +resources and partitions, e.g. + + [auser@cirrus-login3 ~]$ sinfo + + PARTITION AVAIL TIMELIMIT NODES STATE NODELIST + standard up infinite 280 idle r1i0n[0-35],r1i1n[0-35],r1i2n[0-35],r1i3n[0-35],r1i4n[0-35],r1i5n[0-35],r1i6n[0-35],r1i7n[0-6,9-15,18-24,27-33] + gpu up infinite 36 idle r2i4n[0-8],r2i5n[0-8],r2i6n[0-8],r2i7n[0-8] + +### `sbatch`: submitting jobs + +`sbatch` is used to submit a job script to the job submission system. +The script will typically contain one or more `srun` commands to launch +parallel tasks. + +When you submit the job, the scheduler provides the job ID, which is +used to identify this job in other Slurm commands and when looking at +resource usage in SAFE. + + [auser@cirrus-login3 ~]$ sbatch test-job.slurm + Submitted batch job 12345 + +### `squeue`: monitoring jobs + +`squeue` without any options or arguments shows the current status of +all jobs known to the scheduler. For example: + + [auser@cirrus-login3 ~]$ squeue + JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 1554 comp-cse CASTEP_a auser R 0:03 2 r2i0n[18-19] + +will list all jobs on Cirrus. + +The output of this is often overwhelmingly large. You can restrict the +output to just your jobs by adding the `-u $USER` option: + + [auser@cirrus-login3 ~]$ squeue -u $USER + +### `scancel`: deleting jobs + +`scancel` is used to delete a jobs from the scheduler. If the job is +waiting to run it is simply cancelled, if it is a running job then it is +stopped immediately. You need to provide the job ID of the job you wish +to cancel/stop. For example: + + [auser@cirrus-login3 ~]$ scancel 12345 + +will cancel (if waiting) or stop (if running) the job with ID `12345`. + +## Resource Limits + + + +!!! Note + + If you have requirements which do not fit within the current QoS, please contact the Service Desk and we can discuss how to accommodate your requirements. + + + +There are different resource limits on Cirrus for different purposes. +There are three different things you need to specify for each job: + +- The amount of *primary resource* you require (more information on this + below) +- The *partition* that you want to use - this specifies the nodes that + are eligible to run your job +- The *Quality of Service (QoS)* that you want to use - this specifies + the job limits that apply + +Each of these aspects are described in more detail below. + +The *primary resources* you request are *compute* resources: either CPU +cores on the standard compute nodes or GPU cards on the GPU compute +nodes. Other node resources: memory on the standard compute nodes; +memory and CPU cores on the GPU nodes are assigned pro rata based on the +primary resource that you request. + + + +!!! Warning + + + + On Cirrus, you cannot specify the memory for a job using the `--mem` + options to Slurm (e.g. `--mem`, `--mem-per-cpu`, `--mem-per-gpu`). The + amount of memory you are assigned is calculated from the amount of + primary resource you request. + + + +### Primary resources on standard (CPU) compute nodes + +The *primary resource* you request on standard compute nodes are CPU +cores. The maximum amount of memory you are allocated is computed as the +number of CPU cores you requested multiplied by 1/36th of the total +memory available (as there are 36 CPU cores per node). So, if you +request the full node (36 cores), then you will be allocated a maximum +of all of the memory (256 GB) available on the node; however, if you +request 1 core, then you will be assigned a maximum of 256/36 = 7.1 GB +of the memory available on the node. + +!!! Note + + Using the `--exclusive` option in jobs will give you access to the full + node memory even if you do not explicitly request all of the CPU cores + on the node. + + +!!! Warning + + Using the `--exclusive` option will charge your account for the usage of + the entire node, even if you don't request all the cores in your + scripts. + +!!! Note + + You will not generally have access to the full amount of memory resource + on the the node as some is retained for running the operating system and + other system processes. + + + +### Primary resources on GPU nodes + +The *primary resource* you request on standard compute nodes are GPU +cards. The maximum amount of memory and CPU cores you are allocated is +computed as the number of GPU cards you requested multiplied by 1/4 of +the total available (as there are 4 GPU cards per node). So, if you +request the full node (4 GPU cards), then you will be allocated a +maximum of all of the memory (384 GB) available on the node; however, if +you request 1 GPU card, then you will be assigned a maximum of 384/4 = +96 GB of the memory available on the node. + + + +!!! Note + + Using the `--exclusive` option in jobs will give you access to all of + the CPU cores and the full node memory even if you do not explicitly + request all of the GPU cards on the node. + + +!!! Warning + + In order to run jobs on the GPU nodes your budget must have positive GPU + hours *and* core hours associated with it. However, only your GPU hours + will be consumed when running these jobs. + + +!!! Warning + + + Using the `--exclusive` option will charge your account for the usage of + the entire node, *i.e.*, 4 GPUs, even if you don't request all the GPUs + in your submission script. + + + +### Partitions + +On Cirrus, compute nodes are grouped into partitions. You will have to +specify a partition using the `--partition` option in your submission +script. The following table has a list of active partitions on Cirrus. + +| Partition | Description | Total nodes available | Notes | +|-----------|--------------------------------------------------------------------------------|-----------------------|-------| +| standard | CPU nodes with 2x 18-core Intel Broadwell processors | 352 | | +| gpu | GPU nodes with 4x Nvidia V100 GPU and 2x 20-core Intel Cascade Lake processors | 36 | | + +Cirrus Partitions + +You can list the active partitions using + + sinfo + + + +!!! Note + + you may not have access to all the available partitions. + + + +### Quality of Service (QoS) + +On Cirrus Quality of Service (QoS) is used alongside partitions to set +resource limits. The following table has a list of active QoS on Cirrus. + +| QoS Name | Jobs Running Per User | Jobs Queued Per User | Max Walltime | Max Size | Applies to Partitions | Notes | +|--------------|-----------------------|----------------------|--------------|-----------------------------------------|-----------------------|-------| +| standard | No limit | 500 jobs | 4 days | 88 nodes (3168 cores/25%) | standard | | +| largescale | 1 job | 4 jobs | 24 hours | 228 nodes (8192+ cores/65%) or 144 GPUs | standard, gpu | | +| long | 5 jobs | 20 jobs | 14 days | 16 nodes or 8 GPUs | standard, gpu | | +| highpriority | 10 jobs | 20 jobs | 4 days | 140 nodes | standard | | +| gpu | No limit | 128 jobs | 4 days | 64 GPUs (16 nodes/40%) | gpu | | +| short | 1 job | 2 jobs | 20 minutes | 2 nodes or 4 GPUs | standard, gpu | | +| lowpriority | No limit | 100 jobs | 2 days | 36 nodes (1296 cores/10%) or 16 GPUs | standard, gpu | | + +#### Cirrus QoS + +You can find out the QoS that you can use by running the following +command: + + sacctmgr show assoc user=$USER cluster=cirrus format=cluster,account,user,qos%50 + +## Troubleshooting + +### Slurm error handling + +#### MPI jobs + +Users of MPI codes may wish to ensure termination of all tasks on the +failure of one individual task by specifying the `--kill-on-bad-exit` +argument to `srun`. E.g., + +``` bash +srun -n 36 --kill-on-bad-exit ./my-mpi-program +``` + +This can prevent effective "hanging" of the job until the wall time +limit is reached. + +#### Automatic resubmission + +Jobs that fail are not automatically resubmitted by Slurm on Cirrus. +Automatic resubmission can be enabled for a job by specifying the +`--requeue` option to `sbatch`. + +### Slurm error messages + +An incorrect submission will cause Slurm to return an error. Some common +problems are listed below, with a suggestion about the likely cause: + +- `sbatch: unrecognized option ` + + - One of your options is invalid or has a typo. `man sbatch` to help. + +- `error: Batch job submission failed: No partition specified or system default partition` + + > A `--partition=` option is missing. You must specify the partition + > (see the list above). This is most often `--partition=standard`. + +- `error: invalid partition specified: ` + + > `error: Batch job submission failed: Invalid partition name specified` + > + > Check the partition exists and check the spelling is correct. + +- `error: Batch job submission failed: Invalid account or account/partition combination specified` + + > This probably means an invalid account has been given. Check the + > `--account=` options against valid accounts in SAFE. + +- `error: Batch job submission failed: Invalid qos specification` + + > A QoS option is either missing or invalid. Check the script has a + > `--qos=` option and that the option is a valid one from the table + > above. (Check the spelling of the QoS is correct.) + +- `error: Your job has no time specification (--time=)...` + + > Add an option of the form `--time=hours:minutes:seconds` to the + > submission script. E.g., `--time=01:30:00` gives a time limit of 90 + > minutes. + +- `error: QOSMaxWallDurationPerJobLimit` + `error: Batch job submission failed: Job violates accounting/QOS policy` + `(job submit limit, user's size and/or time limits)` + + The script has probably specified a time limit which is too long for + the corresponding QoS. E.g., the time limit for the short QoS is 20 + minutes. + +### Slurm queued reasons + +The `squeue` command allows users to view information for jobs managed +by Slurm. Jobs typically go through the following states: PENDING, +RUNNING, COMPLETING, and COMPLETED. The first table provides a +description of some job state codes. The second table provides a +description of the reasons that cause a job to be in a state. + +| Status | Code | Description | +|---------------|------|-----------------------------------------------------------------------------------------------------------------| +| PENDING | PD | Job is awaiting resource allocation. | +| RUNNING | R | Job currently has an allocation. | +| SUSPENDED | S | Job currently has an allocation. | +| COMPLETING | CG | Job is in the process of completing. Some processes on some nodes may still be active. | +| COMPLETED | CD | Job has terminated all processes on all nodes with an exit code of zero. | +| TIMEOUT | TO | Job terminated upon reaching its time limit. | +| STOPPED | ST | Job has an allocation, but execution has been stopped with SIGSTOP signal. CPUS have been retained by this job. | +| OUT_OF_MEMORY | OOM | Job experienced out of memory error. | +| FAILED | F | Job terminated with non-zero exit code or other failure condition. | +| NODE_FAIL | NF | Job terminated due to failure of one or more allocated nodes. | +| CANCELLED | CA | Job was explicitly cancelled by the user or system administrator. The job may or may not have been initiated. | + +Slurm Job State codes + +For a full list of see [Job State +Codes](https://slurm.schedmd.com/squeue.html#lbAG) + +| Reason | Description | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Priority | One or more higher priority jobs exist for this partition or advanced reservation. | +| Resources | The job is waiting for resources to become available. | +| BadConstraints | The job's constraints can not be satisfied. | +| BeginTime | The job's earliest start time has not yet been reached. | +| Dependency | This job is waiting for a dependent job to complete. | +| Licenses | The job is waiting for a license. | +| WaitingForScheduling | No reason has been set for this job yet. Waiting for the scheduler to determine the appropriate reason. | +| Prolog | Its PrologSlurmctld program is still running. | +| JobHeldAdmin | The job is held by a system administrator. | +| JobHeldUser | The job is held by the user. | +| JobLaunchFailure | The job could not be launched. This may be due to a file system problem, invalid program name, etc. | +| NonZeroExitCode | The job terminated with a non-zero exit code. | +| InvalidAccount | The job's account is invalid. | +| InvalidQOS | The job's QOS is invalid. | +| QOSUsageThreshold | Required QOS threshold has been breached. | +| QOSJobLimit | The job's QOS has reached its maximum job count. | +| QOSResourceLimit | The job's QOS has reached some resource limit. | +| QOSTimeLimit | The job's QOS has reached its time limit. | +| NodeDown | A node required by the job is down. | +| TimeLimit | The job exhausted its time limit. | +| ReqNodeNotAvail | Some node specifically required by the job is not currently available. The node may currently be in use, reserved for another job, in an advanced reservation, DOWN, DRAINED, or not responding. Nodes which are DOWN, DRAINED, or not responding will be identified as part of the job's "reason" field as "UnavailableNodes". Such nodes will typically require the intervention of a system administrator to make available. | + +Slurm Job Reasons + +For a full list of see [Job +Reasons](https://slurm.schedmd.com/squeue.html#lbAF) + +## Output from Slurm jobs + +Slurm places standard output (STDOUT) and standard error (STDERR) for +each job in the file `slurm_.out`. This file appears in the job's +working directory once your job starts running. + + +!!! Note + + + This file is plain text and can contain useful information to help + debugging if a job is not working as expected. The Cirrus Service Desk + team will often ask you to provide the contents of this file if you + contact them for help with issues. + + + +## Specifying resources in job scripts + +You specify the resources you require for your job using directives at +the top of your job submission script using lines that start with the +directive `#SBATCH`. + + + +!!! Note + + Options provided using `#SBATCH` directives can also be specified as + command line options to `srun`. + + + +If you do not specify any options, then the default for each option will +be applied. As a minimum, all job submissions must specify the budget +that they wish to charge the job too, the partition they wish to use and +the QoS they want to use with the options: + + - `--account=` your budget ID is usually something like + `t01` or `t01-test`. You can see which budget codes you can charge + to in SAFE. + - `--partition=` The partition specifies the set of nodes + you want to run on. More information on available partitions is + given above. + - `--qos="QoS"` The QoS specifies the limits to apply to your job. + More information on available QoS are given above. + +Other common options that are used are: + + - `--time=` the maximum walltime for your job. *e.g.* For a + 6.5 hour walltime, you would use `--time=6:30:0`. + - `--job-name=` set a name for the job to help identify it in + Slurm command output. + +Other not so common options that are used are: + + - `--switches=max-switches{@max-time-to-wait}` optimum switches and + max time to wait for them. The scheduler will wait indefinitely when + attempting to place these jobs. Users can override this indefinite + wait. The scheduler will deliberately place work to clear space for + these jobs, so we don't foresee the indefinite wait nature to be an + issue. + +In addition, parallel jobs will also need to specify how many nodes, +parallel processes and threads they require. + + - `--exclusive` to ensure that you have exclusive access to a compute + node + - `--nodes=` the number of nodes to use for the job. + - `--tasks-per-node=` the number of parallel + processes (e.g. MPI ranks) per node. + - `--cpus-per-task=` the number of threads per + parallel process (e.g. number of OpenMP threads per MPI task for + hybrid MPI/OpenMP jobs). **Note:** you must also set the + `OMP_NUM_THREADS` environment variable if using OpenMP in your job + and usually add the `--cpu-bind=cores` option to `srun` + + + +!!! Note + + For parallel jobs, you should request exclusive node access with the + `--exclusive` option to ensure you get the expected resources and + performance. + + + +## `srun`: Launching parallel jobs + +If you are running parallel jobs, your job submission script should +contain one or more `srun` commands to launch the parallel executable +across the compute nodes. As well as launching the executable, `srun` +also allows you to specify the distribution and placement (or *pinning*) +of the parallel processes and threads. + +If you are running MPI jobs that do not also use OpenMP threading, then +you should use `srun` with no additional options. `srun` will use the +specification of nodes and tasks from your job script, `sbatch` or +`salloc` command to launch the correct number of parallel tasks. + +If you are using OpenMP threads then you will generally add the +`--cpu-bind=cores` option to `srun` to bind threads to cores to obtain +the best performance. + + +!!! Note + + See the example job submission scripts below for examples of using + `srun` for pure MPI jobs and for jobs that use OpenMP threading. + + + +## Example parallel job submission scripts + +A subset of example job submission scripts are included in full below. + + + +!!! Hint + + Do not replace `srun` with `mpirun` in the following examples. Although + this might work under special circumstances, it is not guaranteed and + therefore not supported. + + + +### Example: job submission script for MPI parallel job + +A simple MPI job submission script to submit a job using 4 compute nodes +and 36 MPI ranks per node for 20 minutes would look like: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=Example_MPI_Job +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=4 +#SBATCH --tasks-per-node=36 +#SBATCH --cpus-per-task=1 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# We use the "standard" partition as we are running on CPU nodes +#SBATCH --partition=standard +# We use the "standard" QoS as our runtime is less than 4 days +#SBATCH --qos=standard + +# Load the default HPE MPI environment +module load mpt + +# Change to the submission directory +cd $SLURM_SUBMIT_DIR + +# Set the number of threads to 1 +# This prevents any threaded system libraries from automatically +# using threading. +export OMP_NUM_THREADS=1 + +# Launch the parallel job +# Using 144 MPI processes and 36 MPI processes per node +#  srun picks up the distribution from the sbatch options +srun ./my_mpi_executable.x +``` + +This will run your executable "my_mpi_executable.x" in parallel on 144 +MPI processes using 4 nodes (36 cores per node, i.e. not using +hyper-threading). Slurm will allocate 4 nodes to your job and srun will +place 36 MPI processes on each node (one per physical core). + +By default, srun will launch an MPI job that uses all of the cores you +have requested via the "nodes" and "tasks-per-node" options. If you want +to run fewer MPI processes than cores you will need to change the +script. + +For example, to run this program on 128 MPI processes you have two +options: + + - set `--tasks-per-node=32` for an even distribution across nodes + (this may not always be possible depending on the exact combination + of nodes requested and MPI tasks required) + - set the number of MPI tasks explicitly using `#SBATCH --ntasks=128` + + +!!! Note + + If you specify `--ntasks` explicitly and it is not compatible with the + value of `tasks-per-node` then you will get a warning message from + srun such as `srun: Warning: can't honor --ntasks-per-node set to 36`. + + In this case, srun does the sensible thing and allocates MPI processes + as evenly as it can across nodes. For example, the second option above + would result in 32 MPI processes on each of the 4 nodes. + + +See above for a more detailed discussion of the different `sbatch` +options. + +#### Note on MPT task placement + +By default, `mpt` will distribute processss to physical cores (cores +0-17 on socket 0, and cores 18-35 on socket 1) in a cyclic fashion. That +is, rank 0 would be placed on core 0, task 1 on core 18, rank 2 on core +1, and so on (in a single-node job). This may be undesirable. Block, +rather than cyclic, distribution can be obtained with + +``` bash +#SBATCH --distribution=block:block +``` + +The `block:block` here refers to the distribution on both nodes and +sockets. This will distribute rank 0 for core 0, rank 1 to core 1, rank +2 to core 2, and so on. + +### Example: job submission script for MPI+OpenMP (mixed mode) parallel job + +Mixed mode codes that use both MPI (or another distributed memory +parallel model) and OpenMP should take care to ensure that the shared +memory portion of the process/thread placement does not span more than +one node. This means that the number of shared memory threads should be +a factor of 36. + +In the example below, we are using 4 nodes for 6 hours. There are 8 MPI +processes in total (2 MPI processes per node) and 18 OpenMP threads per +MPI process. This results in all 36 physical cores per node being used. + + + +!!! Note + + the use of the `--cpu-bind=cores` option to generate the correct + affinity settings. + + + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=Example_MPI_Job +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=4 +#SBATCH --ntasks=8 +#SBATCH --tasks-per-node=2 +#SBATCH --cpus-per-task=18 + +# Replace [budget code] below with your project code (e.g. t01) +#SBATCH --account=[budget code] +# We use the "standard" partition as we are running on CPU nodes +#SBATCH --partition=standard +# We use the "standard" QoS as our runtime is less than 4 days +#SBATCH --qos=standard + +# Load the default HPE MPI environment +module load mpt + +# Change to the submission directory +cd $SLURM_SUBMIT_DIR + +# Set the number of threads to 18 +# There are 18 OpenMP threads per MPI process +export OMP_NUM_THREADS=18 + +# Launch the parallel job +# Using 8 MPI processes +# 2 MPI processes per node +# 18 OpenMP threads per MPI process + +srun --cpu-bind=cores ./my_mixed_executable.x arg1 arg2 +``` + +### Example: job submission script for OpenMP parallel job + +A simple OpenMP job submission script to submit a job using 1 compute +nodes and 36 threads for 20 minutes would look like: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=Example_OpenMP_Job +#SBATCH --time=0:20:0 +#SBATCH --exclusive +#SBATCH --nodes=1 +#SBATCH --tasks-per-node=1 +#SBATCH --cpus-per-task=36 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# We use the "standard" partition as we are running on CPU nodes +#SBATCH --partition=standard +# We use the "standard" QoS as our runtime is less than 4 days +#SBATCH --qos=standard + +# Load any required modules +module load mpt + +# Change to the submission directory +cd $SLURM_SUBMIT_DIR + +# Set the number of threads to the CPUs per task +export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK + +# Launch the parallel job +# Using 36 threads per node +#  srun picks up the distribution from the sbatch options +srun --cpu-bind=cores ./my_openmp_executable.x +``` + +This will run your executable "my_openmp_executable.x" in parallel on 36 +threads. Slurm will allocate 1 node to your job and srun will place 36 +threads (one per physical core). + +See above for a more detailed discussion of the different `sbatch` +options + +## Job arrays + +The Slurm job scheduling system offers the *job array* concept, for +running collections of almost-identical jobs. For example, running the +same program several times with different arguments or input data. + +Each job in a job array is called a *subjob*. The subjobs of a job array +can be submitted and queried as a unit, making it easier and cleaner to +handle the full set, compared to individual jobs. + +All subjobs in a job array are started by running the same job script. +The job script also contains information on the number of jobs to be +started, and Slurm provides a subjob index which can be passed to the +individual subjobs or used to select the input data per subjob. + +### Job script for a job array + +As an example, the following script runs 56 subjobs, with the subjob +index as the only argument to the executable. Each subjob requests a +single node and uses all 36 cores on the node by placing 1 MPI process +per core and specifies 4 hours maximum runtime per subjob: + +``` bash +#!/bin/bash +# Slurm job options (name, compute nodes, job time) + +#SBATCH --name=Example_Array_Job +#SBATCH --time=04:00:00 +#SBATCH --exclusive +#SBATCH --nodes=1 +#SBATCH --tasks-per-node=36 +#SBATCH --cpus-per-task=1 +#SBATCH --array=0-55 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# We use the "standard" partition as we are running on CPU nodes +#SBATCH --partition=standard +# We use the "standard" QoS as our runtime is less than 4 days +#SBATCH --qos=standard + +# Load the default HPE MPI environment +module load mpt + +# Change to the submission directory +cd $SLURM_SUBMIT_DIR + +# Set the number of threads to 1 +# This prevents any threaded system libraries from automatically +# using threading. +export OMP_NUM_THREADS=1 + +srun /path/to/exe $SLURM_ARRAY_TASK_ID +``` + +### Submitting a job array + +Job arrays are submitted using `sbatch` in the same way as for standard +jobs: + + sbatch job_script.pbs + +## Job chaining + +Job dependencies can be used to construct complex pipelines or chain +together long simulations requiring multiple steps. + + + +!!! Note + + The `--parsable` option to `sbatch` can simplify working with job + dependencies. It returns the job ID in a format that can be used as the + input to other commands. + + + +For example: + + jobid=$(sbatch --parsable first_job.sh) + sbatch --dependency=afterok:$jobid second_job.sh + +or for a longer chain: + + jobid1=$(sbatch --parsable first_job.sh) + jobid2=$(sbatch --parsable --dependency=afterok:$jobid1 second_job.sh) + jobid3=$(sbatch --parsable --dependency=afterok:$jobid1 third_job.sh) + sbatch --dependency=afterok:$jobid2,afterok:$jobid3 last_job.sh + +## Interactive Jobs + +When you are developing or debugging code you often want to run many +short jobs with a small amount of editing the code between runs. This +can be achieved by using the login nodes to run small/short MPI jobs. +However, you may want to test on the compute nodes (e.g. you may want to +test running on multiple nodes across the high performance +interconnect). One way to achieve this on Cirrus is to use an +interactive jobs. + +Interactive jobs via SLURM take two slightly different forms. The first +uses `srun` directly to allocate resource to be used interactively; the +second uses both `salloc` and `srun`. + +### Using srun + +An interactive job via `srun` allows you to execute commands directly +from the command line without using a job submission script, and to see +the output from your program directly in the terminal. + +A convenient way to do this is as follows. + + [user@cirrus-login1]$ srun --exclusive --nodes=1 --time=00:20:00 --partition=standard --qos=standard --account=z04 --pty /usr/bin/bash --login + [user@r1i0n14]$ + +This requests the exclusive use of one node for the given time (here, 20 +minutes). The `--pty /usr/bin/bash --login` requests an interactive +login shell be started. (Note the prompt has changed.) Interactive +commands can then be used as normal and will execute on the compute +node. When no longer required, you can type `exit` or CTRL-D to release +the resources and return control to the front end shell. + + [user@r1i0n14]$ exit + logout + [user@cirrus-login1]$ + +Note that the new interactive shell will reflect the environment of the +original login shell. If you do not wish this, add the `--export=none` +argument to `srun` to provide a clean login environment. + +Within an interactive job, one can use `srun` to launch parallel jobs in +the normal way, e.g., + + [user@r1i0n14]$ srun -n 2 ./a.out + +In this context, one could also use `mpirun` directly. Note we are +limited to the 36 cores of our original `--nodes=1` `srun` request. + +### Using `salloc` with `srun` + +This approach uses the`salloc` command to reserve compute nodes and then +`srun` to launch relevant work. + +To submit a request for a job reserving 2 nodes (72 physical cores) for +1 hour you would issue the command: + +``` bash +[user@cirrus-login1]$ salloc --exclusive --nodes=2 --tasks-per-node=36 --cpus-per-task=1 --time=01:00:00 --partition=standard --qos=standard --account=t01 +salloc: Granted job allocation 8699 +salloc: Waiting for resource configuration +salloc: Nodes r1i7n[13-14] are ready for job +[user@cirrus-login1]$ +``` + +Note that this starts a new shell on the login node associated with the +allocation (the prompt has not changed). The allocation may be released +by exiting this new shell. + + [user@cirrus-login1]$ exit + salloc: Relinquishing job allocation 8699 + [user@cirrus-login1]$ + +While the allocation lasts you will be able to run parallel jobs on the +compute nodes by issuing the `srun` command in the normal way. The +resources available are those specified in the original `salloc` +command. For example, with the above allocation, + + $ srun ./mpi-code.out + +will run 36 MPI tasks per node on two nodes. + +If your allocation reaches its time limit, it will automatically be +termintated and the associated shell will exit. To check that the +allocation is still running, use `squeue`: + + [user@cirrus-login1]$ squeue -u user + JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + 8718 standard bash user R 0:07 2 r1i7n[18-19] + +Choose a time limit long enough to allow the relevant work to be +completed. + +The `salloc` method may be useful if one wishes to associate operations +on the login node (e.g., via a GUI) with work in the allocation itself. + +## Reservations + +Reservations are available on Cirrus. These allow users to reserve a +number of nodes for a specified length of time starting at a particular +time on the system. + +Reservations require justification. They will only be approved if the +request could not be fulfilled with the standard queues. For example, +you require a job/jobs to run at a particular time e.g. for a +demonstration or course. + + + +!!! Note + + Reservation requests must be submitted at least 120 hours in advance of + the reservation start time. We cannot guarantee to meet all reservation + requests due to potential conflicts with other demands on the service + but will do our best to meet all requests. + + + +Reservations will be charged at 1.5 times the usual rate and our policy +is that they will be charged the full rate for the entire reservation at +the time of booking, whether or not you use the nodes for the full time. +In addition, you will not be refunded the compute time if you fail to +use them due to a job crash unless this crash is due to a system +failure. + +To request a reservation you complete a form on SAFE: + + 1. \[Log into SAFE\]() + 2. Under the "Login accounts" menu, choose the "Request reservation" + option + +On the first page, you need to provide the following: + + - The start time and date of the reservation. + - The end time and date of the reservation. + - Your justification for the reservation -- this must be provided or + the request will be rejected. + - The number of nodes required. + +On the second page, you will need to specify which username you wish the +reservation to be charged against and, once the username has been +selected, the budget you want to charge the reservation to. (The +selected username will be charged for the reservation but the +reservation can be used by all members of the selected budget.) + +Your request will be checked by the Cirrus User Administration team and, +if approved, you will be provided a reservation ID which can be used on +the system. To submit jobs to a reservation, you need to add +`--reservation=` and `--qos=reservation` options to your +job submission script or Slurm job submission command. + + +!!! Note + + You must have at least 1 CPUh - and 1 GPUh for reservations on GPU + nodes - to be able to submit jobs to reservations. + + + +!!! Tip + + You can submit jobs to a reservation as soon as the reservation has been + set up; jobs will remain queued until the reservation starts. + + +## Serial jobs + +Unlike parallel jobs, serial jobs will generally not need to specify the +number of nodes and exclusive access (unless they want access to all of +the memory on a node. You usually only need the `--ntasks=1` specifier. +For example, a serial job submission script could look like: + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=Example_Serial_Job +#SBATCH --time=0:20:0 +#SBATCH --ntasks=1 + +# Replace [budget code] below with your budget code (e.g. t01) +#SBATCH --account=[budget code] +# We use the "standard" partition as we are running on CPU nodes +#SBATCH --partition=standard +# We use the "standard" QoS as our runtime is less than 4 days +#SBATCH --qos=standard + +# Change to the submission directory +cd $SLURM_SUBMIT_DIR + +# Enforce threading to 1 in case underlying libraries are threaded +export OMP_NUM_THREADS=1 + +# Launch the serial job +# Using 1 thread +srun --cpu-bind=cores ./my_serial_executable.x +``` + + + +!!! Note + + + + Remember that you will be allocated memory based on the number of tasks + (i.e. CPU cores) that you request. You will get ~7.1 GB per task/core. + If you need more than this for your serial job then you should ask for + the number of tasks you need for the required memory (or use the + `--exclusive` option to get access to all the memory on a node) and + launch specifying a single task using + `srun --ntasks=1 --cpu-bind=cores`. + + + +## Temporary files and `/tmp` in batch jobs + +Applications which normally read and write temporary files from `/tmp` +may require some care in batch jobs on Cirrus. As the size of `/tmp` on +backend nodes is relatively small (\< 150 MB), applications should use a +different location to prevent possible failures. This is relevant for +both CPU and GPU nodes. + +Note also that the default value of the variable `TMPDIR` in batch jobs +is a memory-resident file system location specific to the current job +(typically in the `/dev/shm` directory). Files here reduce the available +capacity of main memory on the node. + +It is recommended that applications with significant temporary file +space requirement should use the `/user-guide/solidstate`. E.g., a +submission script might contain: + + export TMPDIR="/scratch/space1/x01/auser/$SLURM_JOBID.tmp" + mkdir -p $TMPDIR + +to set the standard temporary directory to a unique location in the +solid state storage. You will also probably want to add a line to clean +up the temporary directory at the end of your job script, e.g. + + rm -r $TMPDIR + + + +!!! Tip + + Applications should not hard-code specific locations such as `/tmp`. + Parallel applications should further ensure that there are no collisions + in temporary file names on separate processes/nodes. + + diff --git a/docs/user-guide/connecting-totp.md b/docs/user-guide/connecting-totp.md new file mode 100644 index 00000000..df74bc65 --- /dev/null +++ b/docs/user-guide/connecting-totp.md @@ -0,0 +1,480 @@ +# Connecting to Cirrus + +On the Cirrus system, interactive access can be achieved via SSH, either +directly from a command line terminal or using an SSH client. In +addition data can be transferred to and from the Cirrus system using +`scp` from the command line or by using a file transfer client. + +Before following the process below, we assume you have set up an account +on Cirrus through the EPCC SAFE. Documentation on how to do this can be +found at: + +[SAFE Guide for Users](https://epcced.github.io/safe-docs/safe-for-users/) + +This section covers the basic connection methods. + +## Access credentials + +To access Cirrus, you need to use two credentials: your SSH +key pair protected by a passphrase **and** a Time-based one-time password. You +can find more detailed instructions on +how to set up your credentials to access Cirrus from Windows, macOS and Linux +below. + +### SSH Key Pairs + +You will need to generate an SSH key pair protected by a passphrase to +access Cirrus. + +Using a terminal (the command line), set up a key pair that contains +your e-mail address and enter a passphrase you will use to unlock the +key: + + $ ssh-keygen -t rsa -C "your@email.com" + ... + -bash-4.1$ ssh-keygen -t rsa -C "your@email.com" + Generating public/private rsa key pair. + Enter file in which to save the key (/Home/user/.ssh/id_rsa): [Enter] + Enter passphrase (empty for no passphrase): [Passphrase] + Enter same passphrase again: [Passphrase] + Your identification has been saved in /Home/user/.ssh/id_rsa. + Your public key has been saved in /Home/user/.ssh/id_rsa.pub. + The key fingerprint is: + 03:d4:c4:6d:58:0a:e2:4a:f8:73:9a:e8:e3:07:16:c8 your@email.com + The key's randomart image is: + +--[ RSA 2048]----+ + | . ...+o++++. | + | . . . =o.. | + |+ . . .......o o | + |oE . . | + |o = . S | + |. +.+ . | + |. oo | + |. . | + | .. | + +-----------------+ + +(remember to replace "" with your e-mail address). + +### Upload public part of key pair to SAFE + +You should now upload the public part of your SSH key pair to the SAFE +by following the instructions at: + +[Login to SAFE](https://safe.epcc.ed.ac.uk/). Then: + + 1. Go to the Menu *Login accounts* and select the Cirrus account you want to add the SSH key to + 2. On the subsequent Login account details page click the *Add Credential* button + 3. Select *SSH public key* as the Credential Type and click *Next* + 4. Either copy and paste the public part of your SSH key into the *SSH Public key* box or use the button to select the public key file on your computer. + 5. Click *Add* to associate the public SSH key part with your account + +Once you have done this, your SSH key will be added to your Cirrus +account. + +Remember, you will need to use both an SSH key and Time-based one-time password to log into Cirrus so you will +also need to set up your TBOT before you can log into Cirrus. We cover this next. + +### Time-based one-time password + + +[Login to SAFE](https://safe.epcc.ed.ac.uk/). Then: + +1. Go to the Menu 'Login accounts' and select your Cirrus account. Set MFA-token. A scanable QR code will be displayed. +2. Install a suitable smart-phone app such as google-authenticator (android, ios) on your phone or mobile device. +3. Follow the app instructions to add a new account and scan the displayed QR code (or type in the dispayed 26 character key). +4. Type the verification code generated by the app into the Verification code box. +5. Click Set + +TBOT 2-Factor Authentication is now enabled on your Cirrus account. + +Each time you log in to Cirrus, in additon to requesting your ssh key passphrase, you will also be asked for the current 6-digit authentication code from the app. + + + +## SSH Clients + +Interaction with Cirrus is done remotely, over an encrypted +communication channel, Secure Shell version 2 (SSH-2). This allows +command-line access to one of the login nodes of a Cirrus, from which +you can run commands or use a command-line text editor to edit files. +SSH can also be used to run graphical programs such as GUI text editors +and debuggers when used in conjunction with an X client. + +### Logging in from Linux and MacOS + +Linux distributions and MacOS each come installed with a terminal +application that can be use for SSH access to the login nodes. Linux +users will have different terminals depending on their distribution and +window manager (e.g. GNOME Terminal in GNOME, Konsole in KDE). Consult +your Linux distribution's documentation for details on how to load a +terminal. + +MacOS users can use the Terminal application, located in the Utilities +folder within the Applications folder. + +You can use the following command from the terminal window to login into +Cirrus: + + ssh username@login.cirrus.ac.uk + +You will first be prompted for the passphrase associated with your SSH +key pair. Once you have entered your passphrase successfully, you will +then be prompted for your password. You need to enter both correctly to +be able to access Cirrus. + + +!!! Note + + If your SSH key pair is not stored in the default location (usually `~/.ssh/id_rsa`) on your local system, you may need to specify the path to the private part of the key with the `-i` option to `ssh`. For example, if your key is in a file called `keys/id_rsa_cirrus` you would use the command `ssh -i keys/id_rsa_cirrus username@login.cirrus.ac.uk` to log in. + + +To allow remote programs, especially graphical applications to control +your local display, such as being able to open up a new GUI window (such +as for a debugger), use: + + ssh -X username@login.cirrus.ac.uk + +Some sites recommend using the `-Y` flag. While this can fix some +compatibility issues, the `-X` flag is more secure. + +Current MacOS systems do not have an X window system. Users should +install the XQuartz package to allow for SSH with X11 forwarding on +MacOS systems: + +- [XQuartz website](http://www.xquartz.org/) + +### Logging in from Windows using MobaXterm + +A typical Windows installation will not include a terminal client, +though there are various clients available. We recommend all our Windows +users to download and install MobaXterm to access Cirrus. It is very +easy to use and includes an integrated X server with SSH client to run +any graphical applications on Cirrus. + +You can download MobaXterm Home Edition (Installer Edition) from the +following link: + +- [Install + MobaXterm](http://mobaxterm.mobatek.net/download-home-edition.html) + +Double-click the downloaded Microsoft Installer file (.msi), and the +Windows wizard will automatically guides you through the installation +process. Note, you might need to have administrator rights to install on +some Windows OS. Also make sure to check whether Windows Firewall hasn't +blocked any features of this program after installation. + +Start MobaXterm using, for example, the icon added to the Start menu +during the installation process. + +If you would like to run any small remote GUI applications, then make +sure to use -X option along with the ssh command (see above) to enable +X11 forwarding, which allows you to run graphical clients on your local +X server. + +## Making access more convenient using the SSH configuration file + +Typing in the full command to login or transfer data to Cirrus can +become tedious as it often has to be repeated many times. You can use +the SSH configuration file, usually located on your local machine at +`.ssh/config` to make things a bit more convenient. + +Each remote site (or group of sites) can have an entry in this file +which may look something like: + + Host cirrus + HostName login.cirrus.ac.uk + User username + +(remember to replace `username` with your actual username!). + +The `Host cirrus` line defines a short name for the entry. In this case, +instead of typing `ssh username@login.cirrus.ac.uk` to access the Cirrus +login nodes, you could use `ssh cirrus` instead. The remaining lines +define the options for the `cirrus` host. + +- `Hostname login.cirrus.ac.uk` - defines the full address of the host +- `User username` - defines the username to use by default for this host (replace `username` with your own username on the remote host) + +Now you can use SSH to access Cirrus without needing to enter your +username or the full hostname every time: + + -bash-4.1$ ssh cirrus + +You can set up as many of these entries as you need in your local +configuration file. Other options are available. See the ssh_config man +page (or `man ssh_config` on any machine with SSH installed) for a +description of the SSH configuration file. You may find the +`IdentityFile` option useful if you have to manage multiple SSH key +pairs for different systems as this allows you to specify which SSH key +to use for each system. + + + +!!! Note + + There is a known bug with Windows ssh-agent. If you get the error message: + `Warning: agent returned different signature type ssh-rsa (expected rsa-sha2-512)`, + you will need to either specify the path to your ssh key in the command + line (using the `-i` option as described above) or add the path to your + SSH config file by using the `IdentityFile` option. + + + +## Accessing Cirrus from more than 1 machine + +It is common for users to want to access Cirrus from more than one local +machine (e.g. a desktop linux, and a laptop) - this can be achieved +through use of an `~/.ssh/authorized_keys` file on Cirrus to hold the +additional keys you generate. Note that if you want to access Cirrus via +another remote service, see the next section, SSH forwarding. + +You need to consider one of your local machines as your primary +machine - this is the machine you should connect to Cirrus with using +the instructions further up this page, adding your public key to SAFE. + +On your second local machine, generate a new SSH key pair. Copy the +public key to your primary machine (e.g. by email, USB stick, or cloud +storage); the default location for this on a Linux or MacOS machine will +be `~/.ssh/id_rsa.pub`. If you are a Windows user using MobaXTerm, you +should export the public key it generates to OpenSSH format +(`Conversions > Export OpenSSH Key`). You should never move the private +key off the machine on which it was generated. + +Once back on your primary machine, you should copy the public key from +your secondary machine to Cirrus using: + + scp id_rsa.pub @login.cirrus.ac.uk:id_secondary.pub + +You should then log into Cirrus, as normal: +`ssh @login.cirrus.ac.uk`, and then: + +- check to see if the `.ssh` directory exists, using `ls -la ~` +- if it doesn't, create it, and apply appropriate permissions: + + + + mkdir ~/.ssh + chmod 700 ~/.ssh + +- and then create an authorized_keys file, and add the public key from + your secondary machine in one go: + + + + cat ~/id_secondary.pub >> ~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + rm ~/id_secondary.pub + +You can then repeat this process for any more local machines you want to +access Cirrus from, omitting the `mkdir` and `chmod` lines as the +relevant files and directories will already exist with the correct +permissions. You don't need to add the public key from your primary +machine in your authorized_keys file, +because Cirrus can find this in SAFE. + +Note that the permissions on the `.ssh` directory must be set to 700 +(Owner can read, can write and can execute but group and world do not +have access) and on the `authorized_keys` file must be 600 (Owner can +read and write but group and world do not have access). Keys will be +ignored if this is not the case. + +## SSH forwarding (to use Cirrus from a second remote machine) + +If you want to access Cirrus from a machine you already access remotely +(e.g. to copy data from Cirrus onto a different cluster), you can +*forward* your local Cirrus SSH keys so that you don't need to create a +new key pair on the intermediate machine. + +If your local machine is MacOS or Linus, add your Cirrus SSH key to the +SSH Agent: + + eval "$(ssh-agent -s)" + ssh-add ~/.ssh/id_rsa + +(If you created your key with a different name, replace `id_rsa` in the +command with the name of your private key file). You will be prompted +for your SSH key's passphrase. + +You can then use the `-A` flag when connecting to your intermediate +cluster: + + ssh -A @ + +Once on the intermediate cluster, you should be able to SSH to Cirrus +directly: + + ssh @login.cirrus.ac.uk + +## SSH debugging tips + +If you find you are unable to connect via SSH there are a number of ways +you can try and diagnose the issue. Some of these are collected below - +if you are having difficulties connecting we suggest trying these before +contacting the Cirrus service desk. + +### Can you connect to the login node? + +Try the command `ping -c 3 login.cirrus.ac.uk`. If you successfully +connect to the login node, the output should include: + + --- login.dyn.cirrus.ac.uk ping statistics --- + 3 packets transmitted, 3 received, 0% packet loss, time 38ms + +(the ping time '38ms' is not important). If not all packets are received +there could be a problem with your internet connection, or the login +node could be unavailable. + +### SSH key + +If you get the error message `Permission denied (publickey)` this can +indicate a problem with your SSH key. Some things to check: + +- Have you uploaded the key to SAFE? Please note that if the same key + is reuploaded SAFE will not map the "new" key to cirrus. If for some + reason this is required, please delete the key first, then reupload. + +- Is ssh using the correct key? You can check which keys are being + found and offered by ssh using `ssh -vvv`. If your private key has a + non-default name you can use the `-i` flag to provide it to ssh, + i.e. `ssh -i path/to/key username@login.cirrus.ac.uk`. + +- Are you entering the passphrase correctly? You will be asked for + your private key's passphrase first. If you enter it incorrectly you + will usually be asked to enter it again, and usually up to three + times in total, after which ssh will fail with + `Permission denied (publickey)`. If you would like to confirm your + passphrase without attempting to connect, you can use + `ssh-keygen -y -f /path/to/private/key`. If successful, this command + will print the corresponding public key. You can also use this to + check it is the one uploaded to SAFE. + +- Are permissions correct on the ssh key? One common issue is that the + permissions are incorrect on the either the key file, or the + directory it's contained in. On Linux/MacOS for example, if your + private keys are held in `~/.ssh/` you can check this with + `ls -al ~/.ssh`. This should give something similar to the following + output: + +``` + $ ls -al ~/.ssh/ + drwx------. 2 user group 48 Jul 15 20:24 . + drwx------. 12 user group 4096 Oct 13 12:11 .. + -rw-------. 1 user group 113 Jul 15 20:23 authorized_keys + -rw-------. 1 user group 12686 Jul 15 20:23 id_rsa + -rw-r--r--. 1 user group 2785 Jul 15 20:23 id_rsa.pub + -rw-r--r--. 1 user group 1967 Oct 13 14:11 known_hosts +``` + + The important section here is the string of letters and dashes at + the start, for the lines ending in `.`, `id_rsa`, and `id_rsa.pub`, + which indicate permissions on the containing directory, private key, + and public key respectively. If your permissions are not correct, + they can be set with `chmod`. Consult the table below for the + relevant `chmod` command. On Windows, permissions are handled + differently but can be set by right-clicking on the file and + selecting Properties \> Security \> Advanced. The user, SYSTEM, and + Administrators should have `Full control`, and no other permissions + should exist for both public and private key files, and the + containing folder. + + + + +| Target | Permissions | chmod Code | +| --- | --- | --- | +| Directory | drwx------ | 700 | +| Private Key | -rw------- | 600 | +|Public Key | -rw-r--r-- | 644 | + +`chmod` can be used to set permissions on the target in the following +way: `chmod `. So for example to set correct permissions +on the private key file `id_rsa_cirrus` one would use the command +`chmod 600 id_rsa_cirrus`. + + + +!!! Note + + Unix file permissions can be understood in the following way. There are three groups that can have file permissions: (owning) *users*, (owning) *groups*, and *others*. The available permissions are *read*, *write*, and *execute*. + + The first character indicates whether the target is a file `-`, or directory `d`. The next three characters indicate the owning user's permissions. The first character is `r` if they have read permission, `-` if they don't, the second character is `w` if they have write permission, `-` if they don't, the third character is `x` if they have execute permission, `-` if they don't. This pattern is then repeated for *group*, and *other* permissions. + + For example the pattern `-rw-r--r--` indicates that the owning user can read and write the file, members of the owning group can read it, and anyone else can also read it. The `chmod` codes are constructed by treating the user, group, and owner permission strings as binary numbers, then converting them to decimal. For example the permission string `-rwx------` becomes `111 000 000` -\> `700`. + + + +### Password + +If you are having trouble entering your password consider using a +password manager, from which you can copy and paste it. This will also +help you generate a secure password. If you need to reset your password, +instructions for doing so can be found +[here](https://epcced.github.io/safe-docs/safe-for-users/#reset_machine). + +Windows users please note that `Ctrl+V` does not work to paste in to +PuTTY, MobaXterm, or PowerShell. Instead use `Shift+Ins` to paste. +Alternatively, right-click and select 'Paste' in PuTTY and MobaXterm, or +simply right-click to paste in PowerShell. + +### SSH verbose output + +Verbose debugging output from `ssh` can be very useful for diagnosing +the issue. In particular, it can be used to distinguish between problems +with the SSH key and password - further details are given below. To +enable verbose output add the `-vvv` flag to your SSH command. For +example: + + ssh -vvv username@login.cirrus.ac.uk + +The output is lengthy, but somewhere in there you should see lines +similar to the following: + + debug1: Next authentication method: publickey + debug1: Offering public key: RSA SHA256: + debug3: send_pubkey_test + debug3: send packet: type 50 + debug2: we sent a publickey packet, wait for reply + debug3: receive packet: type 60 + debug1: Server accepts key: pkalg ssh-rsa vlen 2071 + debug2: input_userauth_pk_ok: fp SHA256: + debug3: sign_and_send_pubkey: RSA SHA256: + Enter passphrase for key '': + debug3: send packet: type 50 + debug3: receive packet: type 51 + Authenticated with partial success. + +Most importantly, you can see which files ssh has checked for private +keys, and you can see if any key is accepted. The line +`Authenticated with partial success` indicates that the SSH key has been +accepted, and you will next be asked for your password. By default ssh +will go through a list of standard private key files, as well as any you +have specified with `-i` or a config file. This is fine, as long as one +of the files mentioned is the one that matches the public key uploaded +to SAFE. + +If you do not see `Authenticated with partial success` anywhere in the +verbose output, consider the suggestions under *SSH key* above. If you +do, but are unable to connect, consider the suggestions under *Password* +above. + +The equivalent information can be obtained in PuTTY or MobaXterm by +enabling all logging in settings. + +## Default shell environment + +Usually, when a new login shell is created, the commands on +`$HOME/.bashrc` are executed. This tipically includes setting +user-defined alias, changing environment variables, and, in the case of +an HPC system, loading modules. + +Cirrus does not currently read the `$HOME/.bashrc` file, but it does +read the `$HOME/.bash_profile` file, so, if you wish to read a +`$HOME/.bashrc` file, you can add the following to your +`$HOME/.bash_profile` file (or create one, if it doesn't exist): + + # $HOME/.bash_profile + # load $HOME/.bashrc, if it exists + if [ -f $HOME/.bashrc ]; then + . $HOME/.bashrc + fi diff --git a/docs/user-guide/connecting.md b/docs/user-guide/connecting.md new file mode 100644 index 00000000..a8ca55b8 --- /dev/null +++ b/docs/user-guide/connecting.md @@ -0,0 +1,501 @@ +# Connecting to Cirrus + +On the Cirrus system, interactive access can be achieved via SSH, either +directly from a command line terminal or using an SSH client. In +addition data can be transferred to and from the Cirrus system using +`scp` from the command line or by using a file transfer client. + +Before following the process below, we assume you have set up an account +on Cirrus through the EPCC SAFE. Documentation on how to do this can be +found at: + +[SAFE Guide for Users](https://epcced.github.io/safe-docs/safe-for-users/) + +This section covers the basic connection methods. + +## Access credentials + +To access Cirrus, you need to use two credentials: your password **and** +an SSH key pair protected by a passphrase. You can find more detailed +instructions on how to set up your credentials to access Cirrus from +Windows, macOS and Linux below. + +### SSH Key Pairs + +You will need to generate an SSH key pair protected by a passphrase to +access Cirrus. + +Using a terminal (the command line), set up a key pair that contains +your e-mail address and enter a passphrase you will use to unlock the +key: + + $ ssh-keygen -t rsa -C "your@email.com" + ... + -bash-4.1$ ssh-keygen -t rsa -C "your@email.com" + Generating public/private rsa key pair. + Enter file in which to save the key (/Home/user/.ssh/id_rsa): [Enter] + Enter passphrase (empty for no passphrase): [Passphrase] + Enter same passphrase again: [Passphrase] + Your identification has been saved in /Home/user/.ssh/id_rsa. + Your public key has been saved in /Home/user/.ssh/id_rsa.pub. + The key fingerprint is: + 03:d4:c4:6d:58:0a:e2:4a:f8:73:9a:e8:e3:07:16:c8 your@email.com + The key's randomart image is: + +--[ RSA 2048]----+ + | . ...+o++++. | + | . . . =o.. | + |+ . . .......o o | + |oE . . | + |o = . S | + |. +.+ . | + |. oo | + |. . | + | .. | + +-----------------+ + +(remember to replace "" with your e-mail address). + +### Upload public part of key pair to SAFE + +You should now upload the public part of your SSH key pair to the SAFE +by following the instructions at: + +[Login to SAFE](https://safe.epcc.ed.ac.uk/). Then: + + 1. Go to the Menu *Login accounts* and select the Cirrus account you want to add the SSH key to + 2. On the subsequent Login account details page click the *Add Credential* button + 3. Select *SSH public key* as the Credential Type and click *Next* + 4. Either copy and paste the public part of your SSH key into the *SSH Public key* box or use the button to select the public key file on your computer. + 5. Click *Add* to associate the public SSH key part with your account + +Once you have done this, your SSH key will be added to your Cirrus +account. + +Remember, you will need to use both an SSH key and password to log into +Cirrus so you will also need to collect your initial password before you +can log into Cirrus. We cover this next. + +### Initial passwords + +The SAFE web interface is used to provide your initial password for +logging onto Cirrus (see the [SAFE +Documentation](https://epcced.github.io/safe-docs/) for more details on +requesting accounts and picking up passwords). + +### Changing passwords + +You may now change your password on the Cirrus machine itself using the +*passwd* command or when you are prompted the first time you login. This +change will not be reflected in the SAFE. If you forget your password, +you should use the SAFE to request a new one-shot password. + + + +!!! Note + + When you first log into Cirrus, you will be prompted to change your initial password. This is a three step process: + + 1. When promoted to enter your *ldap password*: Re-enter the password you retrieved from SAFE + 2. When prompted to enter your new password: type in a new password + 3. When prompted to re-enter the new password: re-enter the new + password + + Your password has now been changed + + + +### Password Expiration + +Passwords on Cirrus will expire after two years. When this happens, you +will be required to update your password. This will be done by following +the same steps as above. + +**Note:** You will still be prompted to enter your current password +first before changing your password + +## SSH Clients + +Interaction with Cirrus is done remotely, over an encrypted +communication channel, Secure Shell version 2 (SSH-2). This allows +command-line access to one of the login nodes of a Cirrus, from which +you can run commands or use a command-line text editor to edit files. +SSH can also be used to run graphical programs such as GUI text editors +and debuggers when used in conjunction with an X client. + +### Logging in from Linux and MacOS + +Linux distributions and MacOS each come installed with a terminal +application that can be use for SSH access to the login nodes. Linux +users will have different terminals depending on their distribution and +window manager (e.g. GNOME Terminal in GNOME, Konsole in KDE). Consult +your Linux distribution's documentation for details on how to load a +terminal. + +MacOS users can use the Terminal application, located in the Utilities +folder within the Applications folder. + +You can use the following command from the terminal window to login into +Cirrus: + + ssh username@login.cirrus.ac.uk + +You will first be prompted for the passphrase associated with your SSH +key pair. Once you have entered your passphrase successfully, you will +then be prompted for your password. You need to enter both correctly to +be able to access Cirrus. + + +!!! Note + + If your SSH key pair is not stored in the default location (usually `~/.ssh/id_rsa`) on your local system, you may need to specify the path to the private part of the key with the `-i` option to `ssh`. For example, if your key is in a file called `keys/id_rsa_cirrus` you would use the command `ssh -i keys/id_rsa_cirrus username@login.cirrus.ac.uk` to log in. + + +To allow remote programs, especially graphical applications to control +your local display, such as being able to open up a new GUI window (such +as for a debugger), use: + + ssh -X username@login.cirrus.ac.uk + +Some sites recommend using the `-Y` flag. While this can fix some +compatibility issues, the `-X` flag is more secure. + +Current MacOS systems do not have an X window system. Users should +install the XQuartz package to allow for SSH with X11 forwarding on +MacOS systems: + +- [XQuartz website](http://www.xquartz.org/) + +### Logging in from Windows using MobaXterm + +A typical Windows installation will not include a terminal client, +though there are various clients available. We recommend all our Windows +users to download and install MobaXterm to access Cirrus. It is very +easy to use and includes an integrated X server with SSH client to run +any graphical applications on Cirrus. + +You can download MobaXterm Home Edition (Installer Edition) from the +following link: + +- [Install + MobaXterm](http://mobaxterm.mobatek.net/download-home-edition.html) + +Double-click the downloaded Microsoft Installer file (.msi), and the +Windows wizard will automatically guides you through the installation +process. Note, you might need to have administrator rights to install on +some Windows OS. Also make sure to check whether Windows Firewall hasn't +blocked any features of this program after installation. + +Start MobaXterm using, for example, the icon added to the Start menu +during the installation process. + +If you would like to run any small remote GUI applications, then make +sure to use -X option along with the ssh command (see above) to enable +X11 forwarding, which allows you to run graphical clients on your local +X server. + +## Making access more convenient using the SSH configuration file + +Typing in the full command to login or transfer data to Cirrus can +become tedious as it often has to be repeated many times. You can use +the SSH configuration file, usually located on your local machine at +`.ssh/config` to make things a bit more convenient. + +Each remote site (or group of sites) can have an entry in this file +which may look something like: + + Host cirrus + HostName login.cirrus.ac.uk + User username + +(remember to replace `username` with your actual username!). + +The `Host cirrus` line defines a short name for the entry. In this case, +instead of typing `ssh username@login.cirrus.ac.uk` to access the Cirrus +login nodes, you could use `ssh cirrus` instead. The remaining lines +define the options for the `cirrus` host. + +- `Hostname login.cirrus.ac.uk` - defines the full address of the host +- `User username` - defines the username to use by default for this host (replace `username` with your own username on the remote host) + +Now you can use SSH to access Cirrus without needing to enter your +username or the full hostname every time: + + -bash-4.1$ ssh cirrus + +You can set up as many of these entries as you need in your local +configuration file. Other options are available. See the ssh_config man +page (or `man ssh_config` on any machine with SSH installed) for a +description of the SSH configuration file. You may find the +`IdentityFile` option useful if you have to manage multiple SSH key +pairs for different systems as this allows you to specify which SSH key +to use for each system. + + + +!!! Note + + There is a known bug with Windows ssh-agent. If you get the error message: + `Warning: agent returned different signature type ssh-rsa (expected rsa-sha2-512)`, + you will need to either specify the path to your ssh key in the command + line (using the `-i` option as described above) or add the path to your + SSH config file by using the `IdentityFile` option. + + + +## Accessing Cirrus from more than 1 machine + +It is common for users to want to access Cirrus from more than one local +machine (e.g. a desktop linux, and a laptop) - this can be achieved +through use of an `~/.ssh/authorized_keys` file on Cirrus to hold the +additional keys you generate. Note that if you want to access Cirrus via +another remote service, see the next section, SSH forwarding. + +You need to consider one of your local machines as your primary +machine - this is the machine you should connect to Cirrus with using +the instructions further up this page, adding your public key to SAFE. + +On your second local machine, generate a new SSH key pair. Copy the +public key to your primary machine (e.g. by email, USB stick, or cloud +storage); the default location for this on a Linux or MacOS machine will +be `~/.ssh/id_rsa.pub`. If you are a Windows user using MobaXTerm, you +should export the public key it generates to OpenSSH format +(`Conversions > Export OpenSSH Key`). You should never move the private +key off the machine on which it was generated. + +Once back on your primary machine, you should copy the public key from +your secondary machine to Cirrus using: + + scp id_rsa.pub @login.cirrus.ac.uk:id_secondary.pub + +You should then log into Cirrus, as normal: +`ssh @login.cirrus.ac.uk`, and then: + +- check to see if the `.ssh` directory exists, using `ls -la ~` +- if it doesn't, create it, and apply appropriate permissions: + + + + mkdir ~/.ssh + chmod 700 ~/.ssh + +- and then create an authorized_keys file, and add the public key from + your secondary machine in one go: + + + + cat ~/id_secondary.pub >> ~/.ssh/authorized_keys + chmod 600 ~/.ssh/authorized_keys + rm ~/id_secondary.pub + +You can then repeat this process for any more local machines you want to +access Cirrus from, omitting the `mkdir` and `chmod` lines as the +relevant files and directories will already exist with the correct +permissions. You don't need to add the public key from your primary +machine in your authorized_keys file, +because Cirrus can find this in SAFE. + +Note that the permissions on the `.ssh` directory must be set to 700 +(Owner can read, can write and can execute but group and world do not +have access) and on the `authorized_keys` file must be 600 (Owner can +read and write but group and world do not have access). Keys will be +ignored if this is not the case. + +## SSH forwarding (to use Cirrus from a second remote machine) + +If you want to access Cirrus from a machine you already access remotely +(e.g. to copy data from Cirrus onto a different cluster), you can +*forward* your local Cirrus SSH keys so that you don't need to create a +new key pair on the intermediate machine. + +If your local machine is MacOS or Linus, add your Cirrus SSH key to the +SSH Agent: + + eval "$(ssh-agent -s)" + ssh-add ~/.ssh/id_rsa + +(If you created your key with a different name, replace `id_rsa` in the +command with the name of your private key file). You will be prompted +for your SSH key's passphrase. + +You can then use the `-A` flag when connecting to your intermediate +cluster: + + ssh -A @ + +Once on the intermediate cluster, you should be able to SSH to Cirrus +directly: + + ssh @login.cirrus.ac.uk + +## SSH debugging tips + +If you find you are unable to connect via SSH there are a number of ways +you can try and diagnose the issue. Some of these are collected below - +if you are having difficulties connecting we suggest trying these before +contacting the Cirrus service desk. + +### Can you connect to the login node? + +Try the command `ping -c 3 login.cirrus.ac.uk`. If you successfully +connect to the login node, the output should include: + + --- login.dyn.cirrus.ac.uk ping statistics --- + 3 packets transmitted, 3 received, 0% packet loss, time 38ms + +(the ping time '38ms' is not important). If not all packets are received +there could be a problem with your internet connection, or the login +node could be unavailable. + +### SSH key + +If you get the error message `Permission denied (publickey)` this can +indicate a problem with your SSH key. Some things to check: + +- Have you uploaded the key to SAFE? Please note that if the same key + is reuploaded SAFE will not map the "new" key to cirrus. If for some + reason this is required, please delete the key first, then reupload. + +- Is ssh using the correct key? You can check which keys are being + found and offered by ssh using `ssh -vvv`. If your private key has a + non-default name you can use the `-i` flag to provide it to ssh, + i.e. `ssh -i path/to/key username@login.cirrus.ac.uk`. + +- Are you entering the passphrase correctly? You will be asked for + your private key's passphrase first. If you enter it incorrectly you + will usually be asked to enter it again, and usually up to three + times in total, after which ssh will fail with + `Permission denied (publickey)`. If you would like to confirm your + passphrase without attempting to connect, you can use + `ssh-keygen -y -f /path/to/private/key`. If successful, this command + will print the corresponding public key. You can also use this to + check it is the one uploaded to SAFE. + +- Are permissions correct on the ssh key? One common issue is that the + permissions are incorrect on the either the key file, or the + directory it's contained in. On Linux/MacOS for example, if your + private keys are held in `~/.ssh/` you can check this with + `ls -al ~/.ssh`. This should give something similar to the following + output: + +``` + $ ls -al ~/.ssh/ + drwx------. 2 user group 48 Jul 15 20:24 . + drwx------. 12 user group 4096 Oct 13 12:11 .. + -rw-------. 1 user group 113 Jul 15 20:23 authorized_keys + -rw-------. 1 user group 12686 Jul 15 20:23 id_rsa + -rw-r--r--. 1 user group 2785 Jul 15 20:23 id_rsa.pub + -rw-r--r--. 1 user group 1967 Oct 13 14:11 known_hosts +``` + + The important section here is the string of letters and dashes at + the start, for the lines ending in `.`, `id_rsa`, and `id_rsa.pub`, + which indicate permissions on the containing directory, private key, + and public key respectively. If your permissions are not correct, + they can be set with `chmod`. Consult the table below for the + relevant `chmod` command. On Windows, permissions are handled + differently but can be set by right-clicking on the file and + selecting Properties \> Security \> Advanced. The user, SYSTEM, and + Administrators should have `Full control`, and no other permissions + should exist for both public and private key files, and the + containing folder. + + + + +| Target | Permissions | chmod Code | +| --- | --- | --- | +| Directory | drwx------ | 700 | +| Private Key | -rw------- | 600 | +|Public Key | -rw-r--r-- | 644 | + +`chmod` can be used to set permissions on the target in the following +way: `chmod `. So for example to set correct permissions +on the private key file `id_rsa_cirrus` one would use the command +`chmod 600 id_rsa_cirrus`. + + + +!!! Note + + Unix file permissions can be understood in the following way. There are three groups that can have file permissions: (owning) *users*, (owning) *groups*, and *others*. The available permissions are *read*, *write*, and *execute*. + + The first character indicates whether the target is a file `-`, or directory `d`. The next three characters indicate the owning user's permissions. The first character is `r` if they have read permission, `-` if they don't, the second character is `w` if they have write permission, `-` if they don't, the third character is `x` if they have execute permission, `-` if they don't. This pattern is then repeated for *group*, and *other* permissions. + + For example the pattern `-rw-r--r--` indicates that the owning user can read and write the file, members of the owning group can read it, and anyone else can also read it. The `chmod` codes are constructed by treating the user, group, and owner permission strings as binary numbers, then converting them to decimal. For example the permission string `-rwx------` becomes `111 000 000` -\> `700`. + + + +### Password + +If you are having trouble entering your password consider using a +password manager, from which you can copy and paste it. This will also +help you generate a secure password. If you need to reset your password, +instructions for doing so can be found +[here](https://epcced.github.io/safe-docs/safe-for-users/#reset_machine). + +Windows users please note that `Ctrl+V` does not work to paste in to +PuTTY, MobaXterm, or PowerShell. Instead use `Shift+Ins` to paste. +Alternatively, right-click and select 'Paste' in PuTTY and MobaXterm, or +simply right-click to paste in PowerShell. + +### SSH verbose output + +Verbose debugging output from `ssh` can be very useful for diagnosing +the issue. In particular, it can be used to distinguish between problems +with the SSH key and password - further details are given below. To +enable verbose output add the `-vvv` flag to your SSH command. For +example: + + ssh -vvv username@login.cirrus.ac.uk + +The output is lengthy, but somewhere in there you should see lines +similar to the following: + + debug1: Next authentication method: publickey + debug1: Offering public key: RSA SHA256: + debug3: send_pubkey_test + debug3: send packet: type 50 + debug2: we sent a publickey packet, wait for reply + debug3: receive packet: type 60 + debug1: Server accepts key: pkalg ssh-rsa vlen 2071 + debug2: input_userauth_pk_ok: fp SHA256: + debug3: sign_and_send_pubkey: RSA SHA256: + Enter passphrase for key '': + debug3: send packet: type 50 + debug3: receive packet: type 51 + Authenticated with partial success. + +Most importantly, you can see which files ssh has checked for private +keys, and you can see if any key is accepted. The line +`Authenticated with partial success` indicates that the SSH key has been +accepted, and you will next be asked for your password. By default ssh +will go through a list of standard private key files, as well as any you +have specified with `-i` or a config file. This is fine, as long as one +of the files mentioned is the one that matches the public key uploaded +to SAFE. + +If you do not see `Authenticated with partial success` anywhere in the +verbose output, consider the suggestions under *SSH key* above. If you +do, but are unable to connect, consider the suggestions under *Password* +above. + +The equivalent information can be obtained in PuTTY or MobaXterm by +enabling all logging in settings. + +## Default shell environment + +Usually, when a new login shell is created, the commands on +`$HOME/.bashrc` are executed. This tipically includes setting +user-defined alias, changing environment variables, and, in the case of +an HPC system, loading modules. + +Cirrus does not currently read the `$HOME/.bashrc` file, but it does +read the `$HOME/.bash_profile` file, so, if you wish to read a +`$HOME/.bashrc` file, you can add the following to your +`$HOME/.bash_profile` file (or create one, if it doesn't exist): + + # $HOME/.bash_profile + # load $HOME/.bashrc, if it exists + if [ -f $HOME/.bashrc ]; then + . $HOME/.bashrc + fi diff --git a/docs/user-guide/data.md b/docs/user-guide/data.md new file mode 100644 index 00000000..b8cb7d2e --- /dev/null +++ b/docs/user-guide/data.md @@ -0,0 +1,363 @@ +# Data Management and Transfer + +This section covers the storage and file systems available on the +system; the different ways that you can transfer data to and from +Cirrus; and how to transfer backed up data from prior to the March 2022 +Cirrus upgrade. + +In all cases of data transfer, users should use the Cirrus login nodes. + +## Cirrus file systems and storage + +The Cirrus service, like many HPC systems, has a complex structure. +There are a number of different data storage types available to users: + +- Home file system +- Work file systems +- Solid state storage + +Each type of storage has different characteristics and policies, and is +suitable for different types of use. + +There are also two different types of node available to users: + +- Login nodes +- Compute nodes + +Each type of node sees a different combination of the storage types. The +following table shows which storage options are available on different +node types: + +| Storage | Login nodes | Compute nodes | Notes | +|-------------|-------------|---------------|-----------| +| Home | yes | no | No backup | +| Work | yes | yes | No backup | +| Solid state | yes | yes | No backup | + +### Home file system + +Every project has an allocation on the home file system and your +project's space can always be accessed via the path +`/home/[project-code]`. The home file system is approximately 1.5 PB in +size and is implemented using the Ceph technology. This means that this +storage is not particularly high performance but are well suited to +standard operations like compilation and file editing. This file systems +is visible from the Cirrus login nodes. + +There are currently no backups of any data on the home file system. + +#### Quotas on home file system + +All projects are assigned a quota on the home file system. The project +PI or manager can split this quota up between groups of users if they +wish. + +You can view any home file system quotas that apply to your account by +logging into SAFE and navigating to the page for your Cirrus login +account. + +1. [Log into SAFE](https://safe.epcc.ed.ac.uk) +2. Use the "Login accounts" menu and select your Cirrus login account +3. The "Login account details" table lists any user or group quotas + that are linked with your account. (If there is no quota shown for a + row then you have an unlimited quota for that item, but you may + still may be limited by another quota.) + +Quota and usage data on SAFE is updated twice daily so may not be +exactly up to date with the situation on the system itself. + +#### From the command line + +Some useful information on the current contents of directories on the +`/home` file system is available from the command line by using the Ceph +command `getfattr`. This is to be preferred over standard Unix commands +such as `du` for reasons of efficiency. + +For example, the number of entries (files plus directories) in a home +directory can be queried via + + $ cd + $ getfattr -n ceph.dir.entries . + # file: . + ceph.dir.entries="33" + +The corresponding attribute `rentries` gives the recursive total in all +subdirectories, that is, the total number of files and directories: + + $ getfattr -n ceph.dir.rentries . + # file: . + ceph.dir.rentries="1619179" + +Other useful attributes (all prefixed with `ceph.dir.`) include `files` +which is the number of ordinary files, `subdirs` the number of +subdirectories, and `bytes` the total number of bytes used. All these +have a corresponding recursive version, respectively: `rfiles`, +`rsubdirs`, and `rbytes`. + +A full path name can be specified if required. + +### Work file system + +Every project has an allocation on the work file system and your +project's space can always be accessed via the path +`/work/[project-code]`. The work file system is approximately 400 TB in +size and is implemented using the Lustre parallel file system +technology. They are designed to support data in large files. The +performance for data stored in large numbers of small files is probably +not going to be as good. + +There are currently no backups of any data on the work file system. + +Ideally, the work file system should only contain data that is: + +- actively in use; +- recently generated and in the process of being saved elsewhere; or +- being made ready for up-coming work. + +In practice it may be convenient to keep copies of datasets on the work +file system that you know will be needed at a later date. However, make +sure that important data is always backed up elsewhere and that your +work would not be significantly impacted if the data on the work file +system was lost. + +If you have data on the work file system that you are not going to need +in the future please delete it. + +#### Quotas on the work file system + + + +!!! Tip + + The capacity of the home file system is much larger than the work file system so you should store most data on home and only move data to work that you need for current running work. + + +As for the home file system, all projects are assigned a quota on the +work file system. The project PI or manager can split this quota up +between groups of users if they wish. + +You can view any work file system quotas that apply to your account by +logging into SAFE and navigating to the page for your Cirrus login +account. + +1. [Log into SAFE](https://safe.epcc.ed.ac.uk) +2. Use the "Login accounts" menu and select your Cirrus login account +3. The "Login account details" table lists any user or group quotas + that are linked with your account. (If there is no quota shown for a + row then you have an unlimited quota for that item, but you may + still may be limited by another quota.) + +Quota and usage data on SAFE is updated twice daily so may not be +exactly up to date with the situation on the system itself. + +You can also examine up to date quotas and usage on the Cirrus system +itself using the `lfs quota` command. To do this: + +Change directory to the work directory where you want to check the +quota. For example, if I wanted to check the quota for user `auser` in +project `t01` then I would: + + [auser@cirrus-login1 auser]$ cd /work/t01/t01/auser + + [auser@cirrus-login1 auser]$ lfs quota -hu auser . + Disk quotas for usr auser (uid 68826): + Filesystem used quota limit grace files quota limit grace + . 5.915G 0k 0k - 51652 0 0 - + uid 68826 is using default block quota setting + uid 68826 is using default file quota setting + +the quota and limit of 0k here indicate that no user quota is set for +this user. + +To check your project (group) quota, you would use the command: + + [auser@cirrus-login1 auser]$ lfs quota -hg t01 . + Disk quotas for grp t01 (gid 37733): + Filesystem used quota limit grace files quota limit grace + . 958.3G 0k 13.57T - 1427052 0 0 - + gid 37733 is using default file quota setting + +the limit of `13.57T` indicates the quota for the group. + +### Solid state storage + +More information on using the solid state storage can be found in the +`/user-guide/solidstate` section of the user guide. + +The solid state storage is not backed up. + +## Accessing Cirrus data from before March 2022 + +Prior to the March 2022 Cirrus upgrade,all user date on the `/lustre/sw` +filesystem was archived. Users can access their archived data from the +Cirrus login nodes in the `/home-archive` directory. Assuming you are +user `auser` from project `x01`, your pre-rebuild archived data can be +found in: + + /home-archive/x01/auser + +The data in the `/home-archive` file system is **read only** meaning +that you will not be able to create, edit, or copy new information to +this file system. + +To make archived data visible from the compute nodes, you will need to +copy the data from the `/home-archive` file system to the `/home` file +system. Assuming again that you are user `auser` from project `x01` and +that you were wanting to copy data from +`/home-archive/x01/auser/directory_to_copy` to +`/home/x01/x01/auser/destination_directory`, you would do this by +running: + + cp -r /home-archive/x01/auser/directory_to_copy \ + /home/x01/x01/auser/destination_directory + +Note that the project code appears once in the path for the old home +archive and twice in the path on the new /home file system. + + + +!!! Note + + The capacity of the home file system is much larger than the work file system so you should move data to home rather than work. + + + +## Data transfer + +### Before you start + +Read Harry Mangalam's guide on [How to transfer large amounts of data +via +network](https://hjmangalam.wordpress.com/2009/09/14/how-to-transfer-large-amounts-of-data-via-network/). +This tells you *all* you want to know about transferring data. + +### Data Transfer via SSH + +The easiest way of transferring data to/from Cirrus is to use one of the +standard programs based on the SSH protocol such as `scp`, `sftp` or +`rsync`. These all use the same underlying mechanism (ssh) as you +normally use to login to Cirrus. So, once the command has been executed +via the command line, you will be prompted for your password for the +specified account on the **remote machine**. + +To avoid having to type in your password multiple times you can set up a +*ssh-key* as documented in the User Guide at `connecting` + +### SSH Transfer Performance Considerations + +The ssh protocol encrypts all traffic it sends. This means that +file-transfer using ssh consumes a relatively large amount of CPU time +at both ends of the transfer. The encryption algorithm used is +negotiated between the ssh-client and the ssh-server. There are command +line flags that allow you to specify a preference for which encryption +algorithm should be used. You may be able to improve transfer speeds by +requesting a different algorithm than the default. The *arcfour* +algorithm is usually quite fast assuming both hosts support it. + +A single ssh based transfer will usually not be able to saturate the +available network bandwidth or the available disk bandwidth so you may +see an overall improvement by running several data transfer operations +in parallel. To reduce metadata interactions it is a good idea to +overlap transfers of files from different directories. + +In addition, you should consider the following when transferring data. + +- Only transfer those files that are required. Consider which data you + really need to keep. +- Combine lots of small files into a single *tar* archive, to reduce the + overheads associated in initiating many separate data transfers (over + SSH each file counts as an individual transfer). +- Compress data before sending it, e.g. using gzip. + +### scp command + +The `scp` command creates a copy of a file, or if given the `-r` flag, a +directory, on a remote machine. + +For example, to transfer files to Cirrus: + + scp [options] source user@login.cirrus.ac.uk:[destination] + +(Remember to replace `user` with your Cirrus username in the example +above.) + +In the above example, the `[destination]` is optional, as when left out +`scp` will simply copy the source into the user's home directory. Also +the `source` should be the absolute path of the file/directory being +copied or the command should be executed in the directory containing the +source file/directory. + +If you want to request a different encryption algorithm add the +`-c [algorithm-name]` flag to the `scp` options. For example, to use the +(usually faster) *arcfour* encryption algorithm you would use: + + scp [options] -c arcfour source user@login.cirrus.ac.uk:[destination] + +(Remember to replace `user` with your Cirrus username in the example +above.) + +### rsync command + +The `rsync` command can also transfer data between hosts using a `ssh` +connection. It creates a copy of a file or, if given the `-r` flag, a +directory at the given destination, similar to `scp` above. + +Given the `-a` option rsync can also make exact copies (including +permissions), this is referred to as *mirroring*. In this case the +`rsync` command is executed with ssh to create the copy on a remote +machine. + +To transfer files to Cirrus using `rsync` the command should have the +form: + + rsync [options] -e ssh source user@login.cirrus.ac.uk:[destination] + +(Remember to replace `user` with your Cirrus username in the example +above.) + +In the above example, the `[destination]` is optional, as when left out +`rsync` will simply copy the source into the users home directory. Also +the `source` should be the absolute path of the file/directory being +copied or the command should be executed in the directory containing the +source file/directory. + +Additional flags can be specified for the underlying `ssh` command by +using a quoted string as the argument of the `-e` flag. e.g. + + rsync [options] -e "ssh -c arcfour" source user@login.cirrus.ac.uk:[destination] + +(Remember to replace `user` with your Cirrus username in the example +above.) + + + +### Data transfer using rclone + +Rclone is a command-line program to manage files on cloud storage. You can transfer files directly to/from cloud storage services, such as MS OneDrive and Dropbox. The program preserves timestamps and verifies checksums at all times. + +First of all, you must download and unzip rclone on Cirrus: + + wget https://downloads.rclone.org/v1.62.2/rclone-v1.62.2-linux-amd64.zip + unzip rclone-v1.62.2-linux-amd64.zip + cd rclone-v1.62.2-linux-amd64/ + +The previous code snippet uses rclone v1.62.2, which was the latest version when these instructions were written. + +Configure rclone using `./rclone config`. This will guide you through an interactive setup process where you can make a new remote (called `remote`). See the following for detailed instructions for: + +- [Microsoft OneDrive](https://rclone.org/onedrive/) +- [Dropbox](https://rclone.org/dropbox/) + +Please note that a token is required to connect from Cirrus to the cloud service. You need a web browser to get the token. The recommendation is to run rclone in your laptop using `rclone authorize`, get the token, and then copy the token from your laptop to Cirrus. The rclone website contains further instructions on [configuring rclone on a remote machine without web browser](https://rclone.org/remote_setup/). + +Once all the above is done, you’re ready to go. If you want to copy a directory, please use: + + rclone copy remote: + +Please note that “remote” is the name that you have chosen when running rclone config`. To copy files, please use: + + rclone copyto remote: + +!!! Note + + If the session times out while the data transfer takes place, adding the `-vv` flag to an rclone transfer forces rclone to output to the terminal and therefore avoids triggering the timeout process. diff --git a/docs/user-guide/development.md b/docs/user-guide/development.md new file mode 100644 index 00000000..dc2e9831 --- /dev/null +++ b/docs/user-guide/development.md @@ -0,0 +1,441 @@ +# Application Development Environment + +The application development environment on Cirrus is primarily +controlled through the *modules* environment. By loading and switching +modules you control the compilers, libraries and software available. + +This means that for compiling on Cirrus you typically set the compiler +you wish to use using the appropriate modules, then load all the +required library modules (e.g. numerical libraries, IO format +libraries). + +Additionally, if you are compiling parallel applications using MPI (or +SHMEM, etc.) then you will need to load one of the MPI environments and +use the appropriate compiler wrapper scripts. + +By default, all users on Cirrus start with no modules loaded. + +Basic usage of the `module` command on Cirrus is covered below. For full +documentation please see: + +- [Linux manual page on modules](http://linux.die.net/man/1/module) + +## Using the modules environment + +### Information on the available modules + +Finding out which modules (and hence which compilers, libraries and +software) are available on the system is performed using the +`module avail` command: + + [user@cirrus-login0 ~]$ module avail + ... + +This will list all the names and versions of the modules available on +the service. Not all of them may work in your account though due to, for +example, licencing restrictions. You will notice that for many modules +we have more than one version, each of which is identified by a version +number. One of these versions is the default. As the service develops +the default version will change. + +You can list all the modules of a particular type by providing an +argument to the `module avail` command. For example, to list all +available versions of the Intel Compiler type: + + [user@cirrus-login0 ~]$ module avail intel-compilers + + --------------------------------- /mnt/lustre/indy2lfs/sw/modulefiles -------------------------------- + intel-compilers-18/18.05.274 intel-compilers-19/19.0.0.117 + +If you want more info on any of the modules, you can use the +`module help` command: + + [user@cirrus-login0 ~]$ module help mpt + + ------------------------------------------------------------------- + Module Specific Help for /usr/share/Modules/modulefiles/mpt/2.25: + + The HPE Message Passing Toolkit (MPT) is an optimized MPI + implementation for HPE systems and clusters. See the + MPI(1) man page and the MPT User's Guide for more + information. + ------------------------------------------------------------------- + +The simple `module list` command will give the names of the modules and +their versions you have presently loaded in your environment, e.g.: + + [user@cirrus-login0 ~]$ module list + Currently Loaded Modulefiles: + 1) git/2.35.1(default) 6) gcc/8.2.0(default) + 2) singularity/3.7.2(default) 7) intel-cc-18/18.0.5.274 + 3) epcc/utils 8) intel-fc-18/18.0.5.274 + 4) /mnt/lustre/indy2lfs/sw/modulefiles/epcc/setup-env 9) intel-compilers-18/18.05.274 + 5) intel-license 10) mpt/2.25 + +### Loading, unloading and swapping modules + +To load a module to use `module add` or `module load`. For example, to +load the intel-compilers-18 into the development environment: + + module load intel-compilers-18 + +This will load the default version of the intel compilers. If you need a +specific version of the module, you can add more information: + + module load intel-compilers-18/18.0.5.274 + +will load version 18.0.2.274 for you, regardless of the default. + +If a module loading file cannot be accessed within 10 seconds, a warning +message will appear: `Warning: Module system not loaded`. + +If you want to clean up, `module remove` will remove a loaded module: + + module remove intel-compilers-18 + +(or `module rm intel-compilers-18` or +`module unload intel-compilers-18`) will unload what ever version of +intel-compilers-18 (even if it is not the default) you might have +loaded. There are many situations in which you might want to change the +presently loaded version to a different one, such as trying the latest +version which is not yet the default or using a legacy version to keep +compatibility with old data. This can be achieved most easily by using +"module swap oldmodule newmodule". + +Suppose you have loaded version 18 of the Intel compilers; the following +command will change to version 19: + + module swap intel-compilers-18 intel-compilers-19 + +## Available Compiler Suites + + +!!! Note + + + As Cirrus uses dynamic linking by default you will generally also need to load any modules you used to compile your code in your job submission script when you run your code. + + + +### Intel Compiler Suite + +The Intel compiler suite is accessed by loading the `intel-compilers-*` +and `intel-*/compilers` modules, where `*` references the version. For +example, to load the 2019 release, you would run: + + module load intel-compilers-19 + +Once you have loaded the module, the compilers are available as: + +- `ifort` - Fortran +- `icc` - C +- `icpc` - C++ + +See the extended section below for further details of available Intel +compiler versions and tools. + +### GCC Compiler Suite + +The GCC compiler suite is accessed by loading the `gcc/*` modules, where +`*` again is the version. For example, to load version 8.2.0 you would +run: + + module load gcc/8.2.0 + +Once you have loaded the module, the compilers are available as: + +- `gfortran` - Fortran +- `gcc` - C +- `g++` - C++ + +## Compiling MPI codes + +MPI on Cirrus is currently provided by the HPE MPT library. + +You should also consult the chapter on running jobs through the batch +system for examples of how to run jobs compiled against MPI. + + + +!!! Note + + By default, all compilers produce dynamic executables on Cirrus. This means that you must load the same modules at runtime (usually in your job submission script) as you have loaded at compile time. + + + + +### Using HPE MPT + +To compile MPI code with HPE MPT, using any compiler, you must first +load the "mpt" module. + + module load mpt + +This makes the compiler wrapper scripts `mpicc`, `mpicxx` and `mpif90` +available to you. + +What you do next depends on which compiler (Intel or GCC) you wish to +use to compile your code. + + +!!! Note + + We recommend that you use the Intel compiler wherever possible to compile MPI applications as this is the method officially supported and tested by HPE. + + + +!!! Note + + You can always check which compiler the MPI compiler wrapper scripts are using with, for example, `mpicc -v` or `mpif90 -v`. + + + +#### Using Intel Compilers and HPE MPT + +Once you have loaded the MPT module you should next load the Intel +compilers module you intend to use (e.g. `intel-compilers-19`): + + module load intel-compilers-19 + +The compiler wrappers are then available as + +- `mpif90` - Fortran with MPI +- `mpicc` - C with MPI +- `mpicxx` - C++ with MPI + + +!!! Note + + The MPT compiler wrappers use GCC by default rather than the Intel compilers: + + When compiling C applications you must also specify that `mpicc` should use the `icc` compiler with, for example, `mpicc -cc=icc`. Similarly, when compiling C++ applications you must also specify that `mpicxx` should use the `icpc` compiler with, for example, `mpicxx -cxx=icpc`. (This is not required for Fortran as the `mpif90` compiler automatically uses `ifort`.) If in doubt use `mpicc -cc=icc -v` or `mpicxx -cxx=icpc -v` to see which compiler is actually being called. + + Alternatively, you can set the environment variables `MPICC_CC=icc` and/or `MPICXX=icpc` to ensure the correct base compiler is used: + + export MPICC_CC=icc + export MPICXX_CXX=icpc + + + +#### Using GCC Compilers and HPE MPT + +Once you have loaded the MPT module you should next load the `gcc` +module: + + module load gcc + +Compilers are then available as + +- `mpif90` - Fortran with MPI +- `mpicc` - C with MPI +- `mpicxx` - C++ with MPI + + + +!!! Note + + HPE MPT does not support the syntax `use mpi` in Fortran applications with the GCC compiler `gfortran`. You should use the older `include "mpif.h"` syntax when using GCC compilers with `mpif90`. If you cannot change this, then use the Intel compilers with MPT. + + + +### Using Intel MPI + +Although HPE MPT remains the default MPI library and we recommend that +first attempts at building code follow that route, you may also choose +to use Intel MPI if you wish. To use these, load the appropriate +`intel-mpi` module, for example `intel-mpi-19`: + + module load intel-mpi-19 + +Please note that the name of the wrappers to use when compiling with +Intel MPI depends on whether you are using the Intel compilers or GCC. +You should make sure that you or any tools use the correct ones when +building software. + + + +!!! Note + + Although Intel MPI is available on Cirrus, HPE MPT remains the recommended and default MPI library to use when building applications. + + + + + +!!! Note + + + Using Intel MPI 18 can cause warnings in your output similar to + `no hfi units are available` or + `The /dev/hfi1_0 device failed to appear`. These warnings can be safely + ignored, or, if you would prefer to prevent them, you may add the line + + export I_MPI_FABRICS=shm:ofa + + to your job scripts after loading the Intel MPI 18 module. + + + + + +!!! Note + + + + When using Intel MPI 18, you should always launch MPI tasks with `srun`, + the supported method on Cirrus. Launches with `mpirun` or `mpiexec` will + likely fail. + + + +#### Using Intel Compilers and Intel MPI + +After first loading Intel MPI, you should next load the appropriate +`intel-compilers` module (e.g. `intel-compilers-19`): + + module load intel-compilers-19 + +You may then use the following MPI compiler wrappers: + +- `mpiifort` - Fortran with MPI +- `mpiicc` - C with MPI +- `mpiicpc` - C++ with MPI + +#### Using GCC Compilers and Intel MPI + +After loading Intel MPI, you should next load the `gcc` module you wish +to use: + + module load gcc + +You may then use these MPI compiler wrappers: + +- `mpif90` - Fortran with MPI +- `mpicc` - C with MPI +- `mpicxx` - C++ with MPI + +## Compiler Information and Options + +The manual pages for the different compiler suites are available: + +GCC +Fortran `man gfortran` , C/C++ `man gcc` + +Intel +Fortran `man ifort` , C/C++ `man icc` + +### Useful compiler options + +Whilst difference codes will benefit from compiler optimisations in +different ways, for reasonable performance on Cirrus, at least +initially, we suggest the following compiler options: + +Intel +`-O2` + +GNU +`-O2 -ftree-vectorize -funroll-loops -ffast-math` + +When you have a application that you are happy is working correctly and +has reasonable performance you may wish to investigate some more +aggressive compiler optimisations. Below is a list of some further +optimisations that you can try on your application (Note: these +optimisations may result in incorrect output for programs that depend on +an exact implementation of IEEE or ISO rules/specifications for math +functions): + +Intel +`-fast` + +GNU +`-Ofast -funroll-loops` + +Vectorisation, which is one of the important compiler optimisations for +Cirrus, is enabled by default as follows: + +Intel +At `-O2` and above + +GNU +At `-O3` and above or when using `-ftree-vectorize` + +To promote integer and real variables from four to eight byte precision +for Fortran codes the following compiler flags can be used: + +Intel +`-real-size 64 -integer-size 64 -xAVX` (Sometimes the Intel compiler +incorrectly generates AVX2 instructions if the `-real-size 64` or `-r8` +options are set. Using the `-xAVX` option prevents this.) + +GNU +`-freal-4-real-8 -finteger-4-integer-8` + +## Using static linking/libraries + +By default, executables on Cirrus are built using shared/dynamic +libraries (that is, libraries which are loaded at run-time as and when +needed by the application) when using the wrapper scripts. + +An application compiled this way to use shared/dynamic libraries will +use the default version of the library installed on the system (just +like any other Linux executable), even if the system modules were set +differently at compile time. This means that the application may +potentially be using slightly different object code each time the +application runs as the defaults may change. This is usually the desired +behaviour for many applications as any fixes or improvements to the +default linked libraries are used without having to recompile the +application, however some users may feel this is not the desired +behaviour for their applications. + +Alternatively, applications can be compiled to use static libraries +(i.e. all of the object code of referenced libraries are contained in +the executable file). This has the advantage that once an executable is +created, whenever it is run in the future, it will always use the same +object code (within the limit of changing runtime environment). However, +executables compiled with static libraries have the potential +disadvantage that when multiple instances are running simultaneously +multiple copies of the libraries used are held in memory. This can lead +to large amounts of memory being used to hold the executable and not +application data. + +To create an application that uses static libraries you must pass an +extra flag during compilation, `-Bstatic`. + +Use the UNIX command `ldd exe_file` to check whether you are using an +executable that depends on shared libraries. This utility will also +report the shared libraries this executable will use if it has been +dynamically linked. + +## Intel modules and tools + +There are a number of different Intel compiler versions available, and +there is also a slight difference in the way different versions appear. + +A full list is available via `module avail intel`. + +The different available compiler versions are: + +- `intel-*/18.0.5.274` Intel 2018 Update 4 +- `intel-*/19.0.0.117` Intel 2019 Initial release +- `intel-19.5/*` Intel 2019 Update 5 +- `intel-20.4/*` Intel 2020 Update 4 + +We recommend the most up-to-date version in the first instance, unless +you have particular reasons for preferring an older version. + +For a note on Intel compiler version numbers, see this [Intel +page](https://software.intel.com/content/www/us/en/develop/articles/intel-compiler-and-composer-update-version-numbers-to-compiler-version-number-mapping.html) + +The different module names (or parts thereof) indicate: + +- `cc` C/C++ compilers only +- `cmkl` MKL libraries (see Software Libraries section) +- `compilers` Both C/C++ and Fortran compilers +- `fc` Fortran compiler only +- `itac` Intel Trace Analyze and Collector +- `mpi` Intel MPI +- `pxse` Intel Parallel Studio (all Intel modules) +- `tbb` Thread Building Blocks +- `vtune` VTune profiler - note that in older versions + (`intel-*/18.0.5.274`, `intel-*/19.0.0.117` VTune is launched as + `amplxe-gui` for GUI or `amplxe-cl` for CLI use) diff --git a/docs/user-guide/example_hybrid_hpempt.bash b/docs/user-guide/example_hybrid_hpempt.bash new file mode 100644 index 00000000..c62b8499 --- /dev/null +++ b/docs/user-guide/example_hybrid_hpempt.bash @@ -0,0 +1,33 @@ +#!/bin/bash --login + +# PBS job options (name, compute nodes, job time) +#PBS -N Example_MixedMode_Job +#PBS -l select=4:ncpus=36 +# Parallel jobs should always specify exclusive node access +#PBS -l place=scatter:excl +#PBS -l walltime=6:0:0 + +# Replace [budget code] below with your project code (e.g. t01) +#PBS -A [budget code] + +# Change to the directory that the job was submitted from +cd $PBS_O_WORKDIR + +# Load any required modules +module load mpt +module load intel-compilers-17 + +# Set the number of threads to 18 +# There are 18 OpenMP threads per MPI process +export OMP_NUM_THREADS=18 + +# Launch the parallel job +# Using 8 MPI processes +# 2 MPI processes per node +# 18 OpenMP threads per MPI process +# +# '-ppn' option is required for all HPE MPT jobs otherwise you will get an error similar to: +# 'mpiexec_mpt error: Need 36 processes but have only 1 left in PBS_NODEFILE.' +# +mpiexec_mpt -ppn 2 -n 8 omplace -nt 18 ./my_mixed_executable.x arg1 arg2 > my_stdout.txt 2> my_stderr.txt + diff --git a/docs/user-guide/example_hybrid_impi.bash b/docs/user-guide/example_hybrid_impi.bash new file mode 100644 index 00000000..0b16aa5a --- /dev/null +++ b/docs/user-guide/example_hybrid_impi.bash @@ -0,0 +1,32 @@ +#!/bin/bash --login + +# PBS job options (name, compute nodes, job time) +#PBS -N Example_MixedMode_Job +#PBS -l select=4:ncpus=36 +# Parallel jobs should always specify exclusive node access +#PBS -l place=scatter:excl +#PBS -l walltime=6:0:0 + +# Replace [budget code] below with your project code (e.g. t01) +#PBS -A [budget code] + +# Change to the directory that the job was submitted from +cd $PBS_O_WORKDIR + +# Load any required modules +module load intel-mpi-17 +module load intel-compilers-17 + +# Set the number of threads to 18 +# There are 18 OpenMP threads per MPI process +export OMP_NUM_THREADS=18 + +# Set placement to support hybrid jobs +export I_MPI_PIN_DOMAIN=omp + +# Launch the parallel job +# Using 8 MPI processes +# 2 MPI processes per node +# 18 OpenMP threads per MPI process +mpirun -n 8 -ppn 2 ./my_mixed_executable.x arg1 arg2 > my_stdout.txt 2> my_stderr.txt + diff --git a/docs/user-guide/example_mpi_hpempt.bash b/docs/user-guide/example_mpi_hpempt.bash new file mode 100644 index 00000000..7de140d6 --- /dev/null +++ b/docs/user-guide/example_mpi_hpempt.bash @@ -0,0 +1,32 @@ +#!/bin/bash --login + +# PBS job options (name, compute nodes, job time) +#PBS -N Example_MPI_Job +# Select 4 full nodes +#PBS -l select=4:ncpus=36 +# Parallel jobs should always specify exclusive node access +#PBS -l place=scatter:excl +#PBS -l walltime=00:20:00 + +# Replace [budget code] below with your project code (e.g. t01) +#PBS -A [budget code] + +# Change to the directory that the job was submitted from +cd $PBS_O_WORKDIR + +# Load any required modules +module load mpt +module load intel-compilers-17 + +# Set the number of threads to 1 +# This prevents any threaded system libraries from automatically +# using threading. +export OMP_NUM_THREADS=1 + +# Launch the parallel job +# Using 144 MPI processes and 36 MPI processes per node +# +# '-ppn' option is required for all HPE MPT jobs otherwise you will get an error similar to: +# 'mpiexec_mpt error: Need 36 processes but have only 1 left in PBS_NODEFILE.' +# +mpiexec_mpt -ppn 36 -n 144 ./my_mpi_executable.x arg1 arg2 > my_stdout.txt 2> my_stderr.txt diff --git a/docs/user-guide/example_mpi_impi.bash b/docs/user-guide/example_mpi_impi.bash new file mode 100644 index 00000000..ad41c27f --- /dev/null +++ b/docs/user-guide/example_mpi_impi.bash @@ -0,0 +1,28 @@ +#!/bin/bash --login + +# PBS job options (name, compute nodes, job time) +#PBS -N Example_MPI_Job +# Select 4 full nodes +#PBS -l select=4:ncpus=36 +# Parallel jobs should always specify exclusive node access +#PBS -l place=scatter:excl +#PBS -l walltime=00:20:00 + +# Replace [budget code] below with your project code (e.g. t01) +#PBS -A [budget code] + +# Change to the directory that the job was submitted from +cd $PBS_O_WORKDIR + +# Load any required modules +module load intel-mpi-17 +module load intel-compilers-17 + +# Set the number of threads to 1 +# This prevents any threaded system libraries from automatically +# using threading. +export OMP_NUM_THREADS=1 + +# Launch the parallel job +# Using 144 MPI processes and 36 MPI processes per node +mpirun -n 144 -ppn 36 ./my_mpi_executable.x arg1 arg2 > my_stdout.txt 2> my_stderr.txt diff --git a/docs/user-guide/gpu.md b/docs/user-guide/gpu.md new file mode 100644 index 00000000..45932049 --- /dev/null +++ b/docs/user-guide/gpu.md @@ -0,0 +1,579 @@ +# Using the Cirrus GPU Nodes + +Cirrus has 38 GPU compute nodes each equipped with 4 NVIDIA V100 (Volta) +GPU cards. This section of the user guide gives some details of the +hardware; it also covers how to compile and run standard GPU +applications. + +The GPU cards on Cirrus do not support graphics rendering tasks; they +are set to compute cluster mode and so +only support computational tasks. + +## Hardware details + +All of the Cirrus GPU nodes contain four Tesla V100-SXM2-16GB (Volta) +cards. Each card has 16 GB of high-bandwidth memory, `HBM`, often +referred to as device memory. Maximum device memory bandwidth is in the +region of 900 GB per second. Each card has 5,120 CUDA cores and 640 +Tensor cores. + +There is one GPU Slurm partition installed on Cirrus called simply +`gpu`. The 36 nodes in this partition have the Intel Cascade Lake +architecture. Users concerned with host performance should add the +specific compilation options appropriate for the processor. + +The Cascade Lake nodes have two 20-core sockets (2.5 GHz) and a total of +384 GB host memory (192 GB per socket). Each core supports two threads +in hardware. + +For further details of the V100 architecture see, + . + +## Compiling software for the GPU nodes + +### NVIDIA HPC SDK + +NVIDIA now make regular releases of a unified HPC SDK which provides the +relevant compilers and libraries needed to build and run GPU programs. +Versions of the SDK are available via the module system. + + $ module avail nvidia/nvhpc + +NVIDIA encourage the use of the latest available version, unless there +are particular reasons to use earlier versions. The default version is +therefore the latest module version present on the system. + +Each release of the NVIDIA HPC SDK may include several different +versions of the CUDA toolchain. For example, the `nvidia/nvhpc/21.2` +module comes with CUDA 10.2, 11.0 and 11.2. Only one of these CUDA +toolchains can be active at any one time and for `nvhpc/22.11` this is +CUDA 11.8. + +Here is a list of available HPC SDK versions, and the corresponding +version of CUDA: + +| Module | Supported CUDA Version | +|----------------------|------------------------| +| `nvidia/nvhpc/22.11` | CUDA 11.8 | +| `nvidia/nvhpc/22.2` | CUDA 11.6 | + +To load the latest NVIDIA HPC SDK use + + $ module load nvidia/nvhpc + +The following sections provide some details of compilation for different +programming models. + +### CUDA + +[CUDA](https://developer.nvidia.com/cuda-zone) is a parallel computing +platform and programming model developed by NVIDIA for general computing +on graphical processing units (GPUs). + +Programs, typically written in C or C++, are compiled with `nvcc`. As +well as `nvcc`, a host compiler is required. By default, a `gcc` module +is added when `nvidia/nvhpc` is loaded. + +Compile your source code in the usual way. + + nvcc -arch=sm_70 -o cuda_test.x cuda_test.cu + + +!!! Note + + The `-arch=sm_70` compile option ensures that the binary produced is + compatible with the NVIDIA Volta architecture. + + + +#### Using CUDA with Intel compilers + +You can load either the Intel 18 or Intel 19 compilers to use with +`nvcc`. + + module unload gcc + module load intel-compilers-19 + +You can now use `nvcc -ccbin icpc` to compile your source code with the +Intel C++ compiler `icpc`. + + nvcc -arch=sm_70 -ccbin icpc -o cuda_test.x cuda_test.cu + +### Compiling OpenACC code + +OpenACC is a directive-based approach to introducing parallelism into +either C/C++ or Fortran codes. A code with OpenACC directives may be +compiled like so. + + $ module load nvidia/nvhpc + $ nvc program.c + + $ nvc++ program.cpp + +Note that `nvc` and `nvc++` are distinct from the NVIDIA CUDA compiler +`nvcc`. They provide a way to compile standard C or C++ programs without +explicit CUDA content. See `man nvc` or `man nvc++` for further details. + +### CUDA Fortran + +CUDA Fortran provides extensions to standard Fortran which allow GPU +functionality. CUDA Fortran files (with file extension `.cuf`) may be +compiled with the NVIDIA Fortran compiler. + + $ module load nvidia/nvhpc + $ nvfortran program.cuf + +See `man nvfortran` for further details. + +### OpenMP for GPUs + +The OpenMP API supports multi-platform shared-memory parallel +programming in C/C++ and Fortran and can offload computation from the +host (i.e. CPU) to one or more target devices (such as the GPUs on +Cirrus). OpenMP code can be compiled with the NVIDIA compilers in a +similar manner to OpenACC. To enable this functionality, you must add +`-mp=gpu` to your compile command. + + $ module load nvidia/nvhpc + $ nvc++ -mp=gpu program.cpp + +You can specify exactly which GPU to target with the `-gpu` flag. For +example, the Volta cards on Cirrus use the flag `-gpu=cc70`. + +During development it can be useful to have the compiler report +information about how it is processing OpenMP pragmas. This can be +enabled by the use of `-Minfo=mp`, see below. + + nvc -mp=gpu -Minfo=mp testprogram.c + main: + 24, #omp target teams distribute parallel for thread_limit(128) + 24, Generating Tesla and Multicore code + Generating "nvkernel_main_F1L88_2" GPU kernel + 26, Loop parallelized across teams and threads(128), schedule(static) + +## Submitting jobs to the GPU nodes + +To run a GPU job, a SLURM submission must specify a GPU partition and a +quality of service (QoS) as well as the number of GPUs required. You +specify the number of GPU cards you want using the `--gres=gpu:N` +option, where `N` is typically 1, 2 or 4. + + + +!!! Note + + As there are 4 GPUs per node, each GPU is associated with 1/4 of the + resources of the node, i.e., 10/40 physical cores and roughly 91/384 GB + in host memory. + + +Allocations of host resources are made pro-rata. For example, if 2 GPUs +are requested, `sbatch` will allocate 20 cores and around 190 GB of host +memory (in addition to 2 GPUs). Any attempt to use more than the +allocated resources will result in an error. + +This automatic allocation by SLURM for GPU jobs means that the +submission script should not specify options such as `--ntasks` and +`--cpus-per-task`. Such a job submission will be rejected. See below for +some examples of how to use host resources and how to launch MPI +applications. + +If you specify the `--exclusive` option, you will automatically be +allocated all host cores and all memory from the node irrespective of +how many GPUs you request. This may be needed if the application has a +large host memory requirement. + +If more than one node is required, exclusive mode `--exclusive` and +`--gres=gpu:4` options must be included in your submission script. It +is, for example, not possible to request 6 GPUs other than via exclusive +use of two nodes. + + + +!!! Warning + + In order to run jobs on the GPU nodes your budget must have positive GPU + hours *and* positive CPU core hours associated with it. However, only + your GPU hours will be consumed when running these jobs. + + + +### Partitions + +Your job script must specify a partition. The following table has a list +of relevant GPU partition(s) on Cirrus. + +| Partition | Description | Maximum Job Size (Nodes) | +|-----------|----------------------------------------|--------------------------| +| gpu | GPU nodes with Cascade Lake processors | 36 | + + + +### Quality of Service (QoS) + +Your job script must specify a QoS relevant for the GPU nodes. Available +QoS specifications are as follows. + +| QoS Name | Jobs Running Per User | Jobs Queued Per User | Max Walltime | Max Size | Partition | +|-------------|-----------------------|----------------------|--------------|----------|-----------| +| gpu | No limit | 128 jobs | 4 days | 64 GPUs | gpu | +| long | 5 jobs | 20 jobs | 14 days | 8 GPUs | gpu | +| short | 1 job | 2 jobs | 20 minutes | 4 GPUs | gpu | +| lowpriority | No limit | 100 jobs | 2 days | 16 GPUs | gpu | +| largescale | 1 job | 4 jobs | 24 hours | 144 GPUs | gpu | + + + +## Examples + +### Job submission script using one GPU on a single node + +A job script that requires 1 GPU accelerator and 10 CPU cores for 20 +minutes would look like the following. + + #!/bin/bash + # + #SBATCH --partition=gpu + #SBATCH --qos=gpu + #SBATCH --gres=gpu:1 + #SBATCH --time=00:20:00 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + # Load the required modules + module load nvidia/nvhpc + + srun ./cuda_test.x + +This will execute one host process with access to one GPU. If we wish to +make use of the 10 host cores in this allocation, we could use host +threads via OpenMP. + + export OMP_NUM_THREADS=10 + export OMP_PLACES=cores + + srun --ntasks=1 --cpus-per-task=10 --hint=nomultithread ./cuda_test.x + +The launch configuration is specified directly to `srun` because, for +the GPU partitions, it is not possible to do this via `sbatch`. + +### Job submission script using multiple GPUs on a single node + +A job script that requires 4 GPU accelerators and 40 CPU cores for 20 +minutes would appear as follows. + + #!/bin/bash + # + #SBATCH --partition=gpu + #SBATCH --qos=gpu + #SBATCH --gres=gpu:4 + #SBATCH --time=00:20:00 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + # Load the required modules + module load nvidia/nvhpc + + srun ./cuda_test.x + +A typical MPI application might assign one device per MPI process, in +which case we would want 4 MPI tasks in this example. This would again +be specified directly to `srun`. + + srun --ntasks=4 ./mpi_cuda_test.x + +### Job submission script using multiple GPUs on multiple nodes + +See below for a job script that requires 8 GPU accelerators for 20 +minutes. + + #!/bin/bash + # + #SBATCH --partition=gpu + #SBATCH --qos=gpu + #SBATCH --gres=gpu:4 + #SBATCH --nodes=2 + #SBATCH --exclusive + #SBATCH --time=00:20:00 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + # Load the required modules + module load nvidia/nvhpc + + srun ./cuda_test.x + +An MPI application with four MPI tasks per node would be launched as +follows. + + srun --ntasks=8 --tasks-per-node=4 ./mpi_cuda_test.x + +Again, these options are specified directly to `srun` rather than being +declared as `sbatch` directives. + +Attempts to oversubscribe an allocation (10 cores per GPU) will fail, +and generate an error message. + + srun: error: Unable to create step for job 234123: More processors requested + than permitted + +## Debugging GPU applications + +Applications may be debugged using `cuda-gdb`. This is an extension of +`gdb` which can be used with CUDA. We assume the reader is familiar with +`gdb`. + +First, compile the application with the `-g -G` flags in order to +generate debugging information for both host and device code. Then, +obtain an interactive session like so. + + $ srun --nodes=1 --partition=gpu --qos=short --gres=gpu:1 \ + --time=0:20:0 --account=[budget code] --pty /bin/bash + +Next, load the NVIDIA HPC SDK module and start `cuda-gdb` for your +application. + + $ module load nvidia/nvhpc + $ cuda-gdb ./my-application.x + NVIDIA (R) CUDA Debugger + ... + (cuda-gdb) + +Debugging then proceeds as usual. One can use the help facility within +`cuda-gdb` to find details on the various debugging commands. Type +`quit` to end your debug session followed by `exit` to close the +interactive session. + +Note, it may be necessary to set the temporary directory to somewhere in +the user space (e.g., `export TMPDIR=$(pwd)/tmp`) to prevent unexpected +internal CUDA driver errors. + +For further information on CUDA-GDB, see +. + +## Profiling GPU applications + +NVIDIA provide two useful tools for profiling performance of +applications: Nsight Systems and Nsight Compute; the former provides an +overview of application performance, while the latter provides detailed +information specifically on GPU kernels. + +### Using Nsight Systems + +Nsight Systems provides an overview of application performance and +should therefore be the starting point for investigation. To run an +application, compile as normal (including the `-g` flag) and then submit +a batch job. + + #!/bin/bash + + #SBATCH --time=00:10:00 + #SBATCH --nodes=1 + #SBATCH --exclusive + #SBATCH --partition=gpu + #SBATCH --qos=short + #SBATCH --gres=gpu:1 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + module load nvidia/nvhpc + + srun -n 1 nsys profile -o prof1 ./my_application.x + +The run should then produce an additional output file called, in this +case, `prof1.qdrep`. The recommended way to view the contents of this +file is to download the NVIDIA Nsight package to your own machine (you +do not need the entire HPC SDK). Then copy the `.qdrep` file produced on +Cirrus so that if can be viewed locally. + +Note, a profiling run should probably be of a short duration so that the +profile information (contained in the `.qdrep` file) does not become +prohibitively large. + +Details of the download of Nsight Systems and a user guide can be found +via the links below. + + + + + +If your code was compiled with the tools provided by `nvidia/nvhpc/21.2` +you should download and install Nsight Systems v2020.5.1.85. + +### Using Nsight Compute + +Nsight Compute may be used in a similar way as Nsight Systems. A job may +be submitted like so. + + #!/bin/bash + + #SBATCH --time=00:10:00 + #SBATCH --nodes=1 + #SBATCH --exclusive + #SBATCH --partition=gpu + #SBATCH --qos=short + #SBATCH --gres=gpu:1 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + module load nvidia/nvhpc + + srun -n 1 nv-nsight-cu-cli --section SpeedOfLight_RooflineChart \ + -o prof2 -f ./my_application.x + +In this case, a file called `prof2.ncu-rep` should be produced. Again, +the recommended way to view this file is to download the Nsight Compute +package to your own machine, along with the `.ncu-rep` file from Cirrus. +The `--section` option determines which statistics are recorded +(typically not all hardware counters can be accessed at the same time). +A common starting point is `--section MemoryWorkloadAnalysis`. + +Consult the NVIDIA documentation for further details. + + + + + +Nsight Compute v2021.3.1.0 has been found to work for codes compiled +using `nvhpc` versions 21.2 and 21.9. + + +## Monitoring the GPU Power Usage + +NVIDIA also provides a useful command line utility for the management and monitoring of NVIDIA GPUs: the NVIDIA System Management Interface `nvidia-smi`. + +The `nvidia-smi` command queries the available GPUs and reports current information, including but not limited to: driver versions, CUDA version, name, temperature, current power usage and maximum power capability. In this example output, there is one available GPU and it is idle: + + + + +:: +``` + NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 + +——————————-+———————-+———————- + + GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. + + | MIG M. + +===============================+======================+====================== + + 0 Tesla V100-SXM2… Off | 00000000:1C:00.0 Off | Off + + N/A 38C P0 57W / 300W | 0MiB / 16384MiB | 1% Default + | N/A + +``` +``` + + + Processes: + + GPU GI CI PID Type Process name GPU Memory + ID ID Usage + + No running processes found + +``` + +To monitor the power usage throughout the duration of a job, the output of nvidia-smi will report data at every specified interval with the --loop=SEC option with the tool sleeping in-between queries. The following command will print the output of nvidia-smi every 10 seconds in the specified output file. + + nvidia-smi --loop=10 --filename=out-nvidia-smi.txt & + +Example submission script: + + #!/bin/bash --login + + # Slurm job options (name, compute nodes, job time) + #SBATCH --job-name=lammps_Example + #SBATCH --time=00:20:00 + #SBATCH --nodes=1 + #SBATCH --gres=gpu:4 + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + #SBATCH --partition=gpu + #SBATCH --qos=gpu + + # Load the required modules + module load nvidia/nvhpc + + # Save the output of NVIDIA-SMI every 10 seconds + nvidia-smi --loop=10 --filename=out-nvidia-smi.txt & + srun ./cuda_test.x + +This submission script uses 4 GPU accelerators for 20 minutes, printing the output of nvidia-smi every 10 seconds to the nvidia-smi.txt output file. The & means the shell executes the command in the background. + +Consult the NVIDIA documentation for further details. + +[https://developer.nvidia.com/nvidia-system-management-interface](https://developer.nvidia.com/nvidia-system-management-interface) + + +## Compiling and using GPU-aware MPI + +For applications using message passing via MPI, considerable +improvements in performance may be available by allowing device memory +references in MPI calls. This allows replacement of relevant host device +transfers by direct communication within a node via NVLink. Between +nodes, MPI communication will remain limited by network latency and +bandwidth. + +Version of OpenMPI with both CUDA-aware MPI support and SLURM support +are available, you should load the following modules: + + module load openmpi/4.1.4-cuda-11.8 + module load nvidia/nvhpc-nompi/22.11 + +The command you use to compile depends on whether you are compiling +C/C++ or Fortran. + +### Compiling C/C++ + +The location of the MPI include files and libraries must be specified +explicitly, e.g., + + nvcc -I${MPI_HOME}/include -L${MPI_HOME}/lib -lmpi -o my_program.x my_program.cu + +This will produce an executable in the usual way. + +### Compiling Fortran + +Use the `mpif90` compiler wrapper to compile Fortran code for GPU. e.g. + + mpif90 -o my_program.x my_program.f90 + +This will produce an executable in the usual way. + +### Run time + +A batch script to use such an executable might be: + + #!/bin/bash + + #SBATCH --time=00:20:00 + + #SBATCH --nodes=1 + #SBATCH --partition=gpu + #SBATCH --qos=gpu + #SBATCH --gres=gpu:4 + + # Load the appropriate modules, e.g., + module load openmpi/4.1.4-cuda-11.8 + module load nvidia/nvhpc-nompi/22.2 + + export OMP_NUM_THREADS=1 + + # Note the addition + export OMPI_MCA_pml=ob1 + + srun --ntasks=4 --cpus-per-task=10 --hint=nomultithread ./my_program + +Note the addition of the environment variable `OMPI_MCA_pml=ob1` is +required for correct operation. As before, MPI and placement options +should be directly specified to `srun` and not via `SBATCH` directives. diff --git a/docs/user-guide/introduction.md b/docs/user-guide/introduction.md new file mode 100644 index 00000000..83af2657 --- /dev/null +++ b/docs/user-guide/introduction.md @@ -0,0 +1,40 @@ +# Introduction + +This guide is designed to be a reference for users of the +high-performance computing (HPC) facility: Cirrus. It provides all the +information needed to access the system, transfer data, manage your +resources (disk and compute time), submit jobs, compile programs and +manage your environment. + +## Acknowledging Cirrus + +You should use the following phrase to acknowledge Cirrus in all +research outputs that have used the facility: + +*This work used the Cirrus UK National Tier-2 HPC Service at EPCC +(http://www.cirrus.ac.uk) funded by the University of Edinburgh and +EPSRC (EP/P020267/1)* + +You should also tag outputs with the keyword *Cirrus* whenever possible. + +## Hardware + +Details of the Cirrus hardware are available on the Cirrus website: + +- [Cirrus Hardware](http://www.cirrus.ac.uk/about/hardware.html) + +## Useful terminology + +This is a list of terminology used throughout this guide and its +meaning. + +CPUh +Cirrus CPU time is measured in CPUh. Each job you run on the service +consumes CPUhs from your budget. You can find out more about CPUhs and +how to track your usage in the [resource management section](../resource_management/) + +GPUh +Cirrus GPU time is measured in GPUh. Each job you run on the GPU nodes +consumes GPUhs from your budget, and requires positive CPUh, even though +these will not be consumed. You can find out more about GPUhs and how to +track your usage in the [resource management section](../resource_management/) diff --git a/docs/user-guide/network-upgrade-2023.md b/docs/user-guide/network-upgrade-2023.md new file mode 100644 index 00000000..c21bc334 --- /dev/null +++ b/docs/user-guide/network-upgrade-2023.md @@ -0,0 +1,51 @@ +# Cirrus Network Upgrade: 2023 + +During September 2023 Cirrus will be undergoing a Network upgrade. + +On this page we describe the impact this will have and links to further +information. + +If you have any questions or concerns, please contact the Cirrus Service +Desk: + +## When will the upgrade happen and how long will it take? + +The outage dates will be: + + - Start: Monday 18th September 2023 09:00 + - Ended: Friday 22nd September 2023 11:55 + +We will notify users if we are able to complete this work ahead of +schedule. + +## What are the impacts on users from the upgrade? + +During the upgrade process + +- No login access +- No access to any data on the system +- The SAFE will be available during the outage but there will be reduced functionality due to the unavailability of the connection to Cirrus such as resetting of passwords or new account creation. + +Submitting new work, and running work + +- With no login access, it will not be possible to submit new jobs to the queues +- Jobs will continue to run, and queued jobs will be started as usual + +We will therefore be encouraging users to submit jobs to the queues in the period prior to the work, so that Cirrus can continue to run jobs during the outage. + +## Relaxing of queue limits + +In preparation for the Data Centre Network (DCN) upgrade we have relaxed the queue limits on all the QoS’s, so that users can submit a significantly larger number of jobs to Cirrus. These changes are intended to allow users to submit jobs that they wish to run during the upgrade, in advance of the start of the upgrade. The changes will be in place until the end of the Data Centre Network upgrade. + +## Quality of Service (QoS) + +QoS relaxed limits which will be in force during the Network upgrade. + +| QoS Name | Jobs Running Per User | Jobs Queued Per User | Max Walltime | Max Size | Applies to Partitions | Notes | +| --- | --- | --- | --- | --- | --- | --- | +| standard | No limit | 1000 jobs | 4 days | 88 nodes (3168 cores/25%) |standard | | +| largescale | 1 job | 20 jobs | 24 hours |228 nodes (8192+ cores/65%) or 144 GPUs | standard, gpu | | +| long | 5 jobs | 40 jobs | 14 days | 16 nodes or 8 GPUs | standard, gpu | | +|highpriority | 10 jobs | 20 jobs | 4 days | 140 nodes | standard +|gpu |No limit | 256 jobs | 4 days | 64 GPUs (16 nodes/40%) | gpu | +|lowpriority |No limit | 1000 jobs | 2 days | 36 nodes (1296 cores/10%) or 16 GPUs | standard, gpu | | \ No newline at end of file diff --git a/docs/user-guide/python.md b/docs/user-guide/python.md new file mode 100644 index 00000000..57df98dc --- /dev/null +++ b/docs/user-guide/python.md @@ -0,0 +1,556 @@ +# Using Python + +Python on Cirrus is provided by a number of +[Miniconda](https://conda.io/miniconda.html) modules and one +[Anaconda](https://www.continuum.io) module. (Miniconda being a small +bootstrap version of Anaconda). + +The Anaconda module is called `anaconda/python3` and is suitable for +running serial applications - for parallel applications using `mpi4py` +see [mpi4py for CPU](#mpi4py-for-cpu) or [mpi4py for GPU](#mpi4py-for-gpu). + +You can list the Miniconda modules by running `module avail python` on a +login node. Those module versions that have the `gpu` suffix are +suitable for use on the [Cirrus GPU nodes](../gpu). There are also +modules that extend these Python environments, e.g., `pyfr`, `horovod`, +`tensorflow` and `pytorch` - simply run `module help ` for +further info. + +The Miniconda modules support Python-based parallel codes, i.e., each +such `python` module provides a suite of packages pertinent to parallel +processing and numerical analysis such as `dask`, `ipyparallel`, +`jupyter`, `matplotlib`, `numpy`, `pandas` and `scipy`. + +All the packages provided by a module can be obtained by running +`pip list`. We now give some examples that show how the `python` modules +can be used on the Cirrus CPU/GPU nodes. + +## mpi4py for CPU + +The `python/3.9.13` module provides mpi4py 3.1.3 linked with OpenMPI +4.1.4. + +See `numpy-broadcast.py` below which is a simple MPI Broadcast example, +and the Slurm script `submit-broadcast.slurm` which demonstrates how to +run across it two compute nodes. + +
numpy-broadcast.py + +``` python +#!/usr/bin/env python + +""" +Parallel Numpy Array Broadcast +""" + +from mpi4py import MPI +import numpy as np +import sys + +comm = MPI.COMM_WORLD + +size = comm.Get_size() +rank = comm.Get_rank() +name = MPI.Get_processor_name() + +arraySize = 100 +if rank == 0: + data = np.arange(arraySize, dtype='i') +else: + data = np.empty(arraySize, dtype='i') + +comm.Bcast(data, root=0) + +if rank == 0: + sys.stdout.write( + "Rank %d of %d (%s) has broadcast %d integers.\n" + % (rank, size, name, arraySize)) +else: + sys.stdout.write( + "Rank %d of %d (%s) has received %d integers.\n" + % (rank, size, name, arraySize)) + + arrayBad = False + for i in range(100): + if data[i] != i: + arrayBad = True + break + + if arrayBad: + sys.stdout.write( + "Error, rank %d array is not as expected.\n" + % (rank)) +``` + +

+ +The MPI initialisation is done automatically as a result of calling +`from mpi4py import MPI`. + +
submit-broadcast.slurm + +``` bash +#!/bin/bash + +# Slurm job options (name, compute nodes, job time) +#SBATCH --job-name=broadcast +#SBATCH --time=00:20:00 +#SBATCH --exclusive +#SBATCH --partition=standard +#SBATCH --qos=standard +#SBATCH --account=[budget code] +#SBATCH --nodes=2 +#SBATCH --tasks-per-node=36 +#SBATCH --cpus-per-task=1 + +module load python/3.9.13 + +export OMPI_MCA_mca_base_component_show_load_errors=0 + +srun numpy-broadcast.py +``` + +

+ +The Slurm submission script (`submit-broadcast.slurm`) above sets a +`OMPI_MCA` environment variable before launching the job. That +particular variable suppresses warnings written to the job output file; +it can of course be removed. Please see the [OpenMPI +documentation](https://www.open-mpi.org/faq/?category=tuning#mca-def) +for info on all `OMPI_MCA` variables. + +## mpi4py for GPU + +There's also an mpi4py module (again using OpenMPI 4.1.4) that is +tailored for CUDA 11.6 on the Cirrus GPU nodes, `python/3.9.13-gpu`. We +show below an example that features an MPI reduction performed on a +[CuPy array](https://docs.cupy.dev/en/stable/overview.html) +(`cupy-allreduce.py`). + +
cupy-allreduce.py + +``` python +#!/usr/bin/env python + +""" +Reduce-to-all CuPy Arrays +""" + +from mpi4py import MPI +import cupy as cp +import sys + +comm = MPI.COMM_WORLD + +size = comm.Get_size() +rank = comm.Get_rank() +name = MPI.Get_processor_name() + +sendbuf = cp.arange(10, dtype='i') +recvbuf = cp.empty_like(sendbuf) +assert hasattr(sendbuf, '__cuda_array_interface__') +assert hasattr(recvbuf, '__cuda_array_interface__') +cp.cuda.get_current_stream().synchronize() +comm.Allreduce(sendbuf, recvbuf) + +assert cp.allclose(recvbuf, sendbuf*size) + +sys.stdout.write( + "%d (%s): recvbuf = %s\n" + % (rank, name, str(recvbuf))) +``` + +

+ +By default, the CuPy cache will be located within the user's home +directory. And so, as `/home` is not accessible from the GPU nodes, it +is necessary to set `CUPY_CACHE_DIR` such that the cache is on the +`/work` file system instead. + +
submit-allreduce.slurm + +``` bash +#!/bin/bash + +#SBATCH --job-name=allreduce +#SBATCH --time=00:20:00 +#SBATCH --exclusive +#SBATCH --partition=gpu +#SBATCH --qos=gpu +#SBATCH --account=[budget code] +#SBATCH --nodes=2 +#SBATCH --gres=gpu:4 + +module load python/3.9.13-gpu + +export CUPY_CACHE_DIR=${HOME/home/work}/.cupy/kernel_cache + +export OMPI_MCA_mpi_warn_on_fork=0 +export OMPI_MCA_mca_base_component_show_load_errors=0 + +srun --ntasks=8 --tasks-per-node=4 --cpus-per-task=1 cupy-allreduce.py +``` + +

+ +Again, the submission script (`submit-allreduce.slurm`) is the place to +set `OMPI_MCA` variables - the two shown are optional, see the link +below for further details. + + + +## Machine Learning frameworks + +There are several more Python-based modules that also target the Cirrus +GPU nodes. These include two machine learning frameworks, +`pytorch/1.12.1-gpu` and `tensorflow/2.9.1-gpu`. Both modules are Python +virtual environments that extend `python/3.9.13-gpu`. The MPI comms is +handled by the [Horovod](https://horovod.readthedocs.io/en/stable/) +0.25.0 package along with the [NVIDIA Collective Communications +Library](https://developer.nvidia.com/nccl) v2.11.4. + +A full package list for these environments can be obtained by loading +the module of interest and then running `pip list`. + +Please click on the link indicated to see examples of how to use the +[PyTorch and TensorFlow +modules](https://github.com/hpc-uk/build-instructions/blob/main/pyenvs/horovod/run_horovod_0.25.0_cirrus_gpu.md) +. + +## Installing your own Python packages (with pip) + +This section shows how to setup a local custom Python environment such +that it extends a centrally-installed `python` module. By extend, we +mean being able to install packages locally that are not provided by the +central `python`. This is needed because some packages such as `mpi4py` +must be built specifically for the Cirrus system and so are best +provided centrally. + +You can do this by creating a lightweight **virtual** environment where +the local packages can be installed. Further, this environment is +created on top of an existing Python installation, known as the +environment's **base** Python. + +Select the base Python by loading the `python` module you wish to +extend, e.g., `python/3.9.13-gpu` (you can run `module avail python` to +list all the available `python` modules). + +``` bash +[auser@cirrus-login1 auser]$ module load python/3.9.13 +``` + +Next, create the virtual environment within a designated folder. + +``` bash +python -m venv --system-site-packages /work/x01/x01/auser/myvenv +``` + +In our example, the environment is created within a `myvenv` folder +located on `/work`, which means the environment will be accessible from +the compute nodes. The `--system-site-packages` option ensures that this +environment is based on the currently loaded `python` module. See + for more details. + +``` bash +extend-venv-activate /work/x01/x01/auser/myvenv +``` + +The `extend-venv-activate` command ensures that your virtual +environment's activate script loads and unloads the base `python` module +when appropriate. You're now ready to activate your environment. + +``` bash +source /work/x01/x01/auser/myvenv/bin/activate +``` + +!!! Note + + The path above uses a fictitious project code, `x01`, and username, + `auser`. Please remember to replace those values with your actual + project code and username. Alternatively, you could enter + `${HOME/home/work}` in place of `/work/x01/x01/auser`. That command + fragment expands `${HOME}` and then replaces the `home` part with + `work`. + + + +Installing packages to your local environment can now be done as +follows. + +``` bash +(myvenv) [auser@cirrus-login1 auser]$ python -m pip install +``` + +Running `pip` directly as in `pip install ` will also +work, but we show the `python -m` approach as this is consistent with +the way the virtual environment was created. And when you have finished +installing packages, you can deactivate your environment by issuing the +`deactivate` command. + +``` bash +(myvenv) [auser@cirrus-login1 auser]$ deactivate +[auser@cirrus-login1 auser]$ +``` + +The packages you have just installed locally will only be available once +the local environment has been activated. So, when running code that +requires these packages, you must first activate the environment, by +adding the activation command to the submission script, as shown below. + +
submit-myvenv.slurm + +``` bash +#!/bin/bash + +#SBATCH --job-name=myvenv +#SBATCH --time=00:20:00 +#SBATCH --exclusive +#SBATCH --partition=gpu +#SBATCH --qos=gpu +#SBATCH --account=[budget code] +#SBATCH --nodes=2 +#SBATCH --gres=gpu:4 + +source /work/x01/x01/auser/myvenv/bin/activate + +srun --ntasks=8 --tasks-per-node=4 --cpus-per-task=10 myvenv-script.py +``` + +

+ +Lastly, the environment being extended does not have to come from one of +the centrally-installed `python` modules. You could just as easily +create a local virtual environment based on one of the Machine Learning +(ML) modules, e.g., `horovod`, `tensorflow` or `pytorch`. This means you +would avoid having to install ML packages within your local area. Each +of those ML modules is based on a `python` module. For example, +`tensorflow/2.11.0-gpu` is itself an extension of `python/3.10.8-gpu`. + +## Installing your own Python packages (with conda) + +This section shows you how to setup a local custom Python environment +such that it duplicates a centrally-installed `python` module, ensuring +that your local `conda` environment will contain packages that are +compatible with the Cirrus system. + +Select the base Python by loading the `python` module you wish to +duplicate, e.g., `python/3.9.13-gpu` (you can run `module avail python` +to list all the available `python` modules). + +``` bash +[auser@cirrus-login1 auser]$ module load python/3.9.13 +``` + +Next, create the folder for holding your `conda` environments. This +folder should be on the `/work` file system as `/home` is not accessible +from the compute nodes. + +``` bash +CONDA_ROOT=/work/x01/x01/auser/condaenvs +mkdir -p ${CONDA_ROOT} +``` + +The following commands tell `conda` where to save your custom +environments and packages. + +``` bash +conda config --prepend envs_dirs ${CONDA_ROOT}/envs +conda config --prepend pkgs_dirs ${CONDA_ROOT}/pkgs +``` + +The `conda config` commands are executed just once and the configuration +details are held in a `.condarc` file located in your home directory. +You now need to move this `.condarc` file to a directory visible from +the compute nodes. + +``` bash +mv ~/.condarc ${CONDA_ROOT} +``` + +You can now activate the `conda` configuration. + +``` bash +export CONDARC=${CONDA_ROOT}/.condarc +eval "$(conda shell.bash hook)" +``` + +These two lines need to be called each time you want to use your virtual +`conda` environment. The next command creates that virtual environment. + +``` bash +conda create --clone base --name myvenv +``` + +The above creates an environment called `myvenv` that will hold the same +packages provided by the base `python` module. As this command involves +a significant amount of file copying and downloading, it may take a long +time to complete. When it has completed please activate the local +`myvenv` conda environment. + +``` bash +conda activate myvenv +``` + +You can now install packages using +`conda install -p ${CONDA_ROOT}/envs/myvenv `. And you can +see the packages currently installed in the active environment with the +command `conda list`. After all packages have been installed, simply run +`conda deactivate` twice in order to restore the original comand prompt. + +``` bash +(myvenv) [auser@cirrus-login1 auser]$ conda deactivate +(base) [auser@cirrus-login1 auser]$ conda deactivate +[auser@cirrus-login1 auser]$ +``` + +The submission script below shows how to use the conda environment +within a job running on the compute nodes. + +
submit-myvenv.slurm + +``` bash +#!/bin/bash + +#SBATCH --job-name=myvenv +#SBATCH --time=00:20:00 +#SBATCH --exclusive +#SBATCH --partition=gpu +#SBATCH --qos=gpu +#SBATCH --account=[budget code] +#SBATCH --nodes=2 +#SBATCH --gres=gpu:4 + +module load python/3.9.13 + +CONDA_ROOT=/work/x01/x01/auser/condaenvs +export CONDARC=${CONDA_ROOT}/.condarc +eval "$(conda shell.bash hook)" + +conda activate myvenv + +srun --ntasks=8 --tasks-per-node=4 --cpus-per-task=10 myvenv-script.py +``` + +

+ +You can see that using `conda` is less convenient compared to `pip`. In +particular, the centrally-installed Python packages on copied in to the +local `conda` environment, consuming some of the disk space allocated to +your project. Secondly, activating the `conda` environment within a +submission script is more involved: five commands are required +(including an explicit load for the base `python` module), instead of +the single `source` command that is sufficient for a `pip` environment. + +Further, `conda` cannot be used if the base environment is one of the +Machine Learning (ML) modules, as `conda` is not flexible enough to +gather Python packages from both the ML and base `python` modules (e.g., +the ML module `pytorch/2.0.0-gpu` is itself based on +`python/3.10.8-gpu`, and so `conda` will only duplicate packages +provided by the `python` module and not the ones supplied by `pytorch`). + +## Using JupyterLab on Cirrus + +It is possible to view and run JupyterLab on both the login and compute +nodes of Cirrus. Please note, you can test notebooks on the login nodes, +but please don’t attempt to run any computationally intensive work (such +jobs will be killed should they reach the login node CPU limit). + +If you want to run your JupyterLab on a compute node, you will need to +enter an [interactive session](../batch/#interactive-jobs); otherwise +you can start from a login node prompt. + +1. As described above, load the Anaconda module on Cirrus using + `module load anaconda/python3`. + +2. Run `export JUPYTER_RUNTIME_DIR=$(pwd)`. + +3. Start the JupyterLab server by running + `jupyter lab --ip=0.0.0.0 --no-browser` + + - once it’s started, you will see some lines resembling the + following output. + + + + Or copy and paste one of these URLs: + ... + or http://127.0.0.1:8888/lab?token= + + You will need the URL shown above for step 6. + +4. Please skip this step if you are connecting from Windows. If you are + connecting from Linux or macOS, open a new terminal window, and run + the following command. + + ssh @login.cirrus.ac.uk -L:: + + where \ is your username, \ is as shown in + the URL from the Jupyter output and \ is the name of the + node we’re currently on. On a login node, this will be + `cirrus-login1`, or similar; on a compute node, it will be a mix of + numbers and letters such as `r2i5n5`. + + + + !!! Note + + If, when you connect in the new terminal, you see a message of the + form channel_setup_fwd_listener_tcpip: + cannot listen to port: 8888, it means port 8888 is already in + use. You need to go back to step 3 (kill the existing jupyter lab) + and retry with a new explicit port number by adding the `--port=N` + option. The port number `N` can be in the range 5000-65535. You + should then use the same port number in place of 8888. + + + +5. Please skip this step if you are connecting from Linux or macOS. If + you are connecting from Windows, you should use MobaXterm to + configure an SSH tunnel as follows. + + 5.1. Click on the `Tunnelling` button above the MobaXterm terminal. + Create a new tunnel by clicking on `New SSH tunnel` in the window + that opens. + + 5.2. In the new window that opens, make sure the + `Local port forwarding` radio button is selected. + + 5.3. In the `forwarded port` text box on the left under + `My computer with MobaXterm`, enter the port number indicated in the + Jupyter server output. + + 5.4. In the three text boxes on the bottom right under `SSH server` + enter `login.cirrus.ac.uk`, your Cirrus username, and then `22`. + + 5.5. At the top right, under `Remote server`, enter the name of the + Cirrus login or compute node that you noted earlier followed by the + port number (e.g. 8888). + + 5.6. Click on the `Save` button. + + 5.7. In the tunnelling window, you will now see a new row for the + settings you just entered. If you like, you can give a name to the + tunnel in the leftmost column to identify it. Click on the small key + icon close to the right for the new connection to tell MobaXterm + which SSH private key to use when connecting to Cirrus. You should + tell it to use the same `.ppk` private key that you normally use. + + 5.8. The tunnel should now be configured. Click on the small start + button (like a play `>` icon) for the new tunnel to open it. You'll + be asked to enter your Cirrus password -- please do so. + +6. Now, if you open a browser window on your local machine, you should + be able to navigate to the URL from step 3, and this should display + the JupyterLab server. + + - Please note, you will get a connection error if you haven't used + the correct node name in step 4 or 5. + +If you are on a compute node, the JupyterLab server will be available +for the length of the interactive session you have requested. + +You can also run Jupyter sessions using the centrally-installed +Miniconda3 modules available on Cirrus. For example, the following link +provides instructions for how to setup a Jupyter server on a GPU node. + + diff --git a/docs/user-guide/reading.md b/docs/user-guide/reading.md new file mode 100644 index 00000000..d841980a --- /dev/null +++ b/docs/user-guide/reading.md @@ -0,0 +1,60 @@ +# References and further reading + +## Online Documentation and Resources + +- GNU compiler online documentation: +- MPI Home pages: +- Free MPI implementation useful for testing: + +- Various HPC Workshops by NCCS: + +- An HPC tutorial: +- An MPI tutorial: + +- HPC tutorials by NCSA + +## MPI programming + +- MPI: The Complete Reference. Snir, Otto, Huss-Lederman, Walker and + Dongarra. MIT Press. ISBN 0 262 69184 1 +- MPI: The Complete Reference, volume 2. Gropp et al. MIT Press. ISBN + 0262571234 +- Using MPI. Gropp, Lusk, Skjellum. MIT Press. ISBN 0 262 57104 8 + +## OpenMP programming + +- Parallel Programming in OpenMP. Chandra, Kohr, Menon, Dagum, Maydan, + McDonald. Morgan Kaufmann. ISBN: 1558606718 + +## Parallel programming + +- Practical Parallel Programming. Gregory V. Wilson. MIT Press. ISBN 0 + 262 23186 7 +- Designing and Building Parallel Programs. Ian Foster. Addison-Wesley. + ISBN 0 201 57594 9 +- Parallel Computing Works! Roy D. Williams, Paul C. Messina (Editor), + Geoffrey Fox (Editor), Mark Fox Morgan Kaufmann Publishers; ISBN: + 1558602534 +- Parallel programming with MPI. Peter S. Pancheco. The complete set of + C and Fortran example programs for this book are available at: + + +## Programming languages + +- Fortran90/95 Explained. Metcalf and Reid. Oxford Science Publications. + ISBN 0 19 851888 9 +- Fortran 90 Programming. Ellis, Philips, Lahey. Addison-Wesley. ISBN + 0-201-54446-6 +- Programmers Guide to Fortran90. Brainerd, Goldberg, Adams. Unicomp. + ISBN 0-07-000248-7 +- The High Performance Fortran Handbook. Koelbel, Loveman, Schreiber, + Steele, Zosel. ISBN 0-262-11185-3 / 0-262-61094-9 +- Parallel Programming using C++. G.V.Wilson and P Lu. MIT Press. ISBN 0 + 262 73118 5 + +## Programming skills + +- Debugging and Performance Tuning for Parallel Computing Systems, + Simmons et al. +- Foundations of Parallel Programming, A Machine-independent Approach, + Lewis. diff --git a/docs/user-guide/resource_management.md b/docs/user-guide/resource_management.md new file mode 100644 index 00000000..4b164e47 --- /dev/null +++ b/docs/user-guide/resource_management.md @@ -0,0 +1,343 @@ +# File and Resource Management + +This section covers some of the tools and technical knowledge that will +be key to maximising the usage of the Cirrus system, such as the online +administration tool SAFE and calculating the CPU-time available. + +The default file permissions are then outlined, along with a description +of changing these permissions to the desired setting. This leads on to +the sharing of data between users and systems often a vital tool for +project groups and collaboration. + +Finally we cover some guidelines for I/O and data archiving on Cirrus. + +## The Cirrus Administration Web Site (SAFE) + +All users have a login and password on the Cirrus Administration Web +Site (also know as the 'SAFE'): [SAFE](https://safe.epcc.ed.ac.uk/). +Once logged into this web site, users can find out much about their +usage of the Cirrus system, including: + +- Account details - password reset, change contact details +- Project details - project code, start and end dates +- CPUh balance - how much time is left in each project you are a member + of +- Filesystem details - current usage and quotas +- Reports - generate reports on your usage over a specified period, + including individual job records +- Helpdesk - raise queries and track progress of open queries + +## Checking your CPU/GPU time allocations + +You can view these details by logging into the SAFE +(). + +Use the *Login accounts* menu to select the user account that you wish +to query. The page for the login account will summarise the resources +available to account. + +You can also generate reports on your usage over a particular period and +examine the details of how many CPUh or GPUh individual jobs on the +system cost. To do this use the *Service information* menu and selet +*Report generator*. + +## Disk quotas + +Disk quotas on Cirrus are managed via [SAFE](https://safe.epcc.ed.ac.uk) + +For live disk usage figures on the Lustre `/work` file system, use + + lfs quota -hu /work + + lfs quota -hg /work + +## Backup policies + +The `/home` file system is not backed up. + +The `/work` file system is not backed up. + +The solid-state storage `/scratch/space1` file system is not backed up. + +We strongly advise that you keep copies of any critical data on on an +alternative system that is fully backed up. + +## Sharing data with other Cirrus users + +How you share data with other Cirrus users depends on whether or not +they belong to the same project as you. Each project has two shared +folders that can be used for sharing data. + +### Sharing data with Cirrus users in your project + +Each project has an inner shared folder on the `/home` and `/work` +filesystems: + + /home/[project code]/[project code]/shared + + /work/[project code]/[project code]/shared + +This folder has read/write permissions for all project members. You can +place any data you wish to share with other project members in this +directory. For example, if your project code is `x01` the inner shared +folder on the `/work` file system would be located at +`/work/x01/x01/shared`. + +### Sharing data with all Cirrus users + +Each project also has an outer shared folder on the `/home` and `/work` +filesystems: + + /home/[project code]/shared + + /work/[project code]/shared + +It is writable by all project members and readable by any user on the +system. You can place any data you wish to share with other Cirrus users +who are not members of your project in this directory. For example, if +your project code is `x01` the outer shared folder on the `/work` file +system would be located at `/work/x01/shared`. + +## File permissions and security + +You should check the permissions of any files that you place in the +shared area, especially if those files were created in your own Cirrus +account. Files of the latter type are likely to be readable by you only. + +The chmod command below shows how to make sure that a file placed in the +outer shared folder is also readable by all Cirrus users. + + chmod a+r /work/x01/shared/your-shared-file.txt + +Similarly, for the inner shared folder, chmod can be called such that +read permission is granted to all users within the x01 project. + + chmod g+r /work/x01/x01/shared/your-shared-file.txt + +If you're sharing a set of files stored within a folder hierarchy the +chmod is slightly more complicated. + + chmod -R a+Xr /work/x01/shared/my-shared-folder + chmod -R g+Xr /work/x01/x01/shared/my-shared-folder + +The `-R` option ensures that the read permission is enabled recursively +and the `+X` guarantees that the user(s) you're sharing the folder with +can access the subdirectories below my-shared-folder. + +Default Unix file permissions can be specified by the `umask` command. +The default umask value on Cirrus is 22, which provides "group" and +"other" read permissions for all files created, and "group" and "other" +read and execute permissions for all directories created. This is highly +undesirable, as it allows everyone else on the system to access (but at +least not modify or delete) every file you create. Thus it is strongly +recommended that users change this default umask behaviour, by adding +the command `umask 077` to their `$HOME/.profile` file. This umask +setting only allows the user access to any file or directory created. +The user can then selectively enable "group" and/or "other" access to +particular files or directories if required. + +## File types + +### ASCII (or formatted) files + +These are the most portable, but can be extremely inefficient to read +and write. There is also the problem that if the formatting is not done +correctly, the data may not be output to full precision (or to the +subsequently required precision), resulting in inaccurate results when +the data is used. Another common problem with formatted files is FORMAT +statements that fail to provide an adequate range to accommodate future +requirements, e.g. if we wish to output the total number of processors, +NPROC, used by the application, the statement: + + WRITE (*,'I3') NPROC + +will not work correctly if NPROC is greater than 999. + +### Binary (or unformatted) files + +These are much faster to read and write, especially if an entire array +is read or written with a single READ or WRITE statement. However the +files produced may not be readable on other systems. + +GNU compiler `-fconvert=swap` compiler option. +This compiler option often needs to be used together with a second +option `-frecord-marker`, which specifies the length of record marker +(extra bytes inserted before or after the actual data in the binary +file) for unformatted files generated on a particular system. To read a +binary file generated by a big-endian system on Cirrus, use +`-fconvert=swap -frecord-marker=4`. Please note that due to the same +'length of record marker' reason, the unformatted files generated by GNU +and other compilers on Cirrus are not compatible. In fact, the same +WRITE statements would result in slightly larger files with GNU +compiler. Therefore it is recommended to use the same compiler for your +simulations and related pre- and post-processing jobs. + +Other options for file formats include: + +Direct access files +Fortran unformatted files with specified record lengths. These may be +more portable between different systems than ordinary (i.e. sequential +IO) unformatted files, with significantly better performance than +formatted (or ASCII) files. The "endian" issue will, however, still be a +potential problem. + +Portable data formats +These machine-independent formats for representing scientific data are +specifically designed to enable the same data files to be used on a wide +variety of different hardware and operating systems. The most common +formats are: + +- netCDF: +- HDF: + +It is important to note that these portable data formats are evolving +standards, so make sure you are aware of which version of the +standard/software you are using, and keep up-to-date with any +backward-compatibility implications of each new release. + +## File IO Performance Guidelines + +Here are some general guidelines + +- Whichever data formats you choose, it is vital that you test that you + can access your data correctly on all the different systems where it + is required. This testing should be done as early as possible in the + software development or porting process (i.e. before you generate lots + of data from expensive production runs), and should be repeated with + every major software upgrade. +- Document the file formats and metadata of your important data files + very carefully. The best documentation will include a copy of the + relevant I/O subroutines from your code. Of course, this documentation + must be kept up-to-date with any code modifications. +- Use binary (or unformatted) format for files that will only be used on + the Intel system, e.g. for checkpointing files. This will give the + best performance. Binary files may also be suitable for larger output + data files, if they can be read correctly on other systems. +- Most codes will produce some human-readable (i.e. ASCII) files to + provide some information on the progress and correctness of the + calculation. Plan ahead when choosing format statements to allow for + future code usage, e.g. larger problem sizes and processor counts. +- If the data you generate is widely shared within a large community, or + if it must be archived for future reference, invest the time and + effort to standardise on a suitable portable data format, such as + netCDF or HDF. + +## Common I/O patterns + +There is a number of I/O patterns that are frequently used in +applications: + +### Single file, single writer (Serial I/O) + +A common approach is to funnel all the I/O through a single master +process. Although this has the advantage of producing a single file, the +fact that only a single client is doing all the I/O means that it gains +little benefit from the parallel file system. + +### File-per-process (FPP) + +One of the first parallel strategies people use for I/O is for each +parallel process to write to its own file. This is a simple scheme to +implement and understand but has the disadvantage that, at the end of +the calculation, the data is spread across many different files and may +therefore be difficult to use for further analysis without a data +reconstruction stage. + +### Single file, multiple writers without collective operations + +There are a number of ways to achieve this. For example, many processes +can open the same file but access different parts by skipping some +initial offset; parallel I/O libraries such as MPI-IO, HDF5 and NetCDF +also enable this. + +Shared-file I/O has the advantage that all the data is organised +correctly in a single file making analysis or restart more +straightforward. + +The problem is that, with many clients all accessing the same file, +there can be a lot of contention for file system resources. + +### Single Shared File with collective writes (SSF) + +The problem with having many clients performing I/O at the same time is +that, to prevent them clashing with each other, the I/O library may have +to take a conservative approach. For example, a file may be locked while +each client is accessing it which means that I/O is effectively +serialised and performance may be poor. + +However, if I/O is done collectively where the library knows that all +clients are doing I/O at the same time, then reads and writes can be +explicitly coordinated to avoid clashes. It is only through collective +I/O that the full bandwidth of the file system can be realised while +accessing a single file. + +## Achieving efficient I/O + +This section provides information on getting the best performance out of +the `/work` parallel file system on Cirrus when writing data, +particularly using parallel I/O patterns. + +You may find that using the `/user-guide/solidstate` gives better +performance than `/work` for some applications and IO patterns. + +### Lustre + +The Cirrus `/work` file system use Lustre as a parallel file system +technology. The Lustre file system provides POSIX semantics (changes on +one node are immediately visible on other nodes) and can support very +high data rates for appropriate I/O patterns. + +### Striping + +One of the main factors leading to the high performance of `/work` +Lustre file systems is the ability to stripe data across multiple Object +Storage Targets (OSTs) in a round-robin fashion. Files are striped when +the data is split up in chunks that will then be stored on different +OSTs across the `/work` file system. Striping might improve the I/O +performance because it increases the available bandwidth since multiple +processes can read and write the same files simultaneously. However +striping can also increase the overhead. Choosing the right striping +configuration is key to obtain high performance results. + +Users have control of a number of striping settings on Lustre file +systems. Although these parameters can be set on a per-file basis they +are usually set on directory where your output files will be written so +that all output files inherit the settings. + +#### Default configuration + +The file system on Cirrus has the following default stripe settings: + +- A default stripe count of 1 +- A default stripe size of 1 MiB (1048576 bytes) + +These settings have been chosen to provide a good compromise for the +wide variety of I/O patterns that are seen on the system but are +unlikely to be optimal for any one particular scenario. The Lustre +command to query the stripe settings for a directory (or file) is +`lfs getstripe`. For example, to query the stripe settings of an already +created directory `res_dir`: + + $ lfs getstripe res_dir/ + res_dir + stripe_count: 1 stripe_size: 1048576 stripe_offset: -1 + +#### Setting Custom Striping Configurations + +Users can set stripe settings for a directory (or file) using the +`lfs setstripe` command. The options for `lfs setstripe` are: + +- `[--stripe-count|-c]` to set the stripe count; 0 means use the system + default (usually 1) and -1 means stripe over all available OSTs. +- `[--stripe-size|-s]` to set the stripe size; 0 means use the system + default (usually 1 MB) otherwise use k, m or g for KB, MB or GB + respectively +- `[--stripe-index|-i]` to set the OST index (starting at 0) on which to + start striping for this file. An index of -1 allows the MDS to choose + the starting index and it is strongly recommended, as this allows + space and load balancing to be done by the MDS as needed. + +For example, to set a stripe size of 4 MiB for the existing directory +`res_dir`, along with maximum striping count you would use: + + $ lfs setstripe -s 4m -c -1 res_dir/ diff --git a/docs/user-guide/singularity.md b/docs/user-guide/singularity.md new file mode 100644 index 00000000..49d47806 --- /dev/null +++ b/docs/user-guide/singularity.md @@ -0,0 +1,397 @@ +# Singularity Containers + +This page was originally based on the documentation at the [University +of Sheffield HPC +service](http://docs.hpc.shef.ac.uk/en/latest/sharc/software/apps/singularity.html). + +Designed around the notion of mobility of compute and reproducible +science, Singularity enables users to have full control of their +operating system environment. This means that a non-privileged user can +"swap out" the Linux operating system and environment on the host for a +Linux OS and environment that they control. So if the host system is +running CentOS Linux but your application runs in Ubuntu Linux with a +particular software stack, you can create an Ubuntu image, install your +software into that image, copy the image to another host (e.g. Cirrus), +and run your application on that host in its native Ubuntu environment. + +Singularity also allows you to leverage the resources of whatever host +you are on. This includes high-speed interconnects (e.g. Infiniband), +file systems (e.g. Lustre) and potentially other resources (such as the +licensed Intel compilers on Cirrus). + + + +!!! Note + + Singularity only supports Linux containers. You cannot create images + that use Windows or macOS (this is a restriction of the containerisation + model rather than of Singularity). + + +## Useful Links + +- [Singularity website](https://www.sylabs.io/) +- [Singularity documentation archive](https://www.sylabs.io/docs/) + +## About Singularity Containers (Images) + +Similar to Docker, a Singularity container (or, more commonly, *image*) +is a self-contained software stack. As Singularity does not require a +root-level daemon to run its images (as is required by Docker) it is +suitable for use on a multi-user HPC system such as Cirrus. Within the +container/image, you have exactly the same permissions as you do in a +standard login session on the system. + +In principle, this means that an image created on your local machine +with all your research software installed for local development will +also run on Cirrus. + +Pre-built images (such as those on [DockerHub](http://hub.docker.com) or +[SingularityHub](https://singularity-hub.org/)) can simply be downloaded +and used on Cirrus (or anywhere else Singularity is installed); see +`use_image_singularity`). + +Creating and modifying images requires root permission and so must be +done on a system where you have such access (in practice, this is +usually within a virtual machine on your laptop/workstation); see +`create_image_singularity`. + +## Using Singularity Images on Cirrus + +Singularity images can be used on Cirrus in a number of ways. + +1. Interactively on the login nodes +2. Interactively on compute nodes +3. As serial processes within a non-interactive batch script +4. As parallel processes within a non-interactive batch script + +We provide information on each of these scenarios. First, we describe +briefly how to get existing images onto Cirrus so that you can use them. + +### Getting existing images onto Cirrus + +Singularity images are simply files, so if you already have an image +file, you can use `scp` to copy the file to Cirrus as you would with any +other file. + +If you wish to get a file from one of the container image repositories +then Singularity allows you to do this from Cirrus itself. + +For example, to retrieve an image from SingularityHub on Cirrus we can +simply issue a Singularity command to pull the image. + + [user@cirrus-login1 ~]$ module load singularity + [user@cirrus-login1 ~]$ singularity pull hello-world.sif shub://vsoch/hello-world + +The image located at the `shub` URI is written to a Singularity Image +File (SIF) called `hello-world.sif`. + +### Interactive use on the login nodes + +The container represented by the image file can be run on the login node +like so. + + [user@cirrus-login1 ~]$ singularity run hello-world.sif + RaawwWWWWWRRRR!! Avocado! + [user@cirrus-login1 ~]$ + +We can also `shell` into the container. + + [user@cirrus-login1 ~]$ singularity shell hello-world.sif + Singularity> ls / + bin boot dev environment etc home lib lib64 lustre media mnt opt proc rawr.sh root run sbin singularity srv sys tmp usr var + Singularity> exit + exit + [user@cirrus-login1 ~]$ + +For more information see the [Singularity +documentation](https://www.sylabs.io/guides/3.7/user-guide). + +### Interactive use on the compute nodes + +The process for using an image interactively on the compute nodes is +very similar to that for using them on the login nodes. The only +difference is that you first have to submit an interactive serial job to +get interactive access to the compute node. + +First though, move to a suitable location on `/work` and re-pull the +`hello-world` image. This step is necessary as the compute nodes do not +have access to the `/home` file system. + + [user@cirrus-login1 ~]$ cd ${HOME/home/work} + [user@cirrus-login1 ~]$ singularity pull hello-world.sif shub://vsoch/hello-world + +Now reserve a full node to work on interactively by issuing an `salloc` +command, see below. + + [user@cirrus-login1 ~]$ salloc --exclusive --nodes=1 \ + --tasks-per-node=36 --cpus-per-task=1 --time=00:20:00 \ + --partition=standard --qos=standard --account=[budget code] + salloc: Pending job allocation 14507 + salloc: job 14507 queued and waiting for resources + salloc: job 14507 has been allocated resources + salloc: Granted job allocation 14507 + salloc: Waiting for resource configuration + salloc: Nodes r1i0n8 are ready for job + [user@cirrus-login1 ~]$ ssh r1i0n8 + +Note the prompt has changed to show you are on a compute node. Once you +are logged in to the compute node (you may need to submit your account +password), move to a suitable location on `/work` as before. You can now +use the `hello-world` image in the same way you did on the login node. + + [user@r1i0n8 ~]$ cd ${HOME/home/work} + [user@r1i0n8 ~]$ singularity shell hello-world.sif + Singularity> exit + exit + [user@r1i0n8 ~]$ exit + logout + Connection to r1i0n8 closed. + [user@cirrus-login1 ~]$ exit + exit + salloc: Relinquishing job allocation 14507 + salloc: Job allocation 14507 has been revoked. + [user@cirrus-login1 ~]$ + +Note we used `exit` to leave the interactive container shell and then +called `exit` twice more to close the interactive job on the compute +node. + +### Serial processes within a non-interactive batch script + +You can also use Singularity images within a non-interactive batch +script as you would any other command. If your image contains a +*runscript* then you can use `singularity run` to execute the runscript +in the job. You can also use `singularity exec` to execute arbitrary +commands (or scripts) within the image. + +An example job submission script to run a serial job that executes the +runscript within the `hello-world.sif` we built above on Cirrus would be +as follows. + + #!/bin/bash --login + + # job options (name, compute nodes, job time) + #SBATCH --job-name=hello-world + #SBATCH --ntasks=1 + #SBATCH --exclusive + #SBATCH --time=0:20:0 + #SBATCH --partition=standard + #SBATCH --qos=standard + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + # Load any required modules + module load singularity + + # Run the serial executable + srun --cpu-bind=cores singularity run ${HOME/home/work}/hello-world.sif + +Submit this script using the `sbatch` command and once the job has +finished, you should see `RaawwWWWWWRRRR!! Avocado!` in the Slurm output +file. + +### Parallel processes within a non-interactive batch script + +Running a Singularity container on the compute nodes isn't too different +from launching a normal parallel application. The submission script +below shows that the `srun` command now contains an additional +`singularity` clause. + + #!/bin/bash --login + + # job options (name, compute nodes, job time) + #SBATCH --job-name=[name of application] + #SBATCH --nodes=4 + #SBATCH --tasks-per-node=36 + #SBATCH --cpus-per-task=1 + #SBATCH --exclusive + #SBATCH --time=0:20:0 + #SBATCH --partition=standard + #SBATCH --qos=standard + + # Replace [budget code] below with your project code (e.g. t01) + #SBATCH --account=[budget code] + + # Load any required modules + module load mpt + module load singularity + + # The host bind paths for the Singularity container. + BIND_ARGS=/scratch/sw,/opt/hpe,/etc/libibverbs.d,/path/to/input/files + + # The file containing environment variable settings that will allow + # the container to find libraries on the host, e.g., LD_LIBRARY_PATH . + ENV_PATH=/path/to/container/environment/file + + CONTAINER_PATH=/path/to/singularity/image/file + + APP_PATH=/path/to/containerized/application/executable + APP_PARAMS=[application parameters] + + srun --distribution=block:block --hint=nomultithread \ + singularity exec --bind ${BIND_ARGS} --env-file ${ENV_PATH} ${IMAGE_PATH} + ${APP_PATH} ${APP_PARAMS} + +The script above runs a containerized application such that each of the +four nodes requested is fully populated. In general, the containerized +application's input and output will be read from and written to a +location on the host; hence, it is necessary to pass a suitable bind +path to singularity (`/path/to/input/files`). + + + +!!! Note + + The paths in the submission script that begin `/path/to` should be + provided by the user. All but one of these paths are host specific. The + exception being `APP_PATH`, which should be given a path relative to the + container file system. + + + +If the Singularity image file was built according to the [Bind +model](https://sylabs.io/guides/3.7/user-guide/mpi.html#bind-model), you +will need to specify certain paths (`--bind`) and environment variables +(`--env-file`) that allow the containerized application to find the +required MPI libraries. + +Otherwise, if the image follows the [Hybrid +model](https://sylabs.io/guides/3.7/user-guide/mpi.html#hybrid-model) +and so contains its own MPI implementation, you instead need to be sure +that the containerized MPI is compatible with the host MPI, the one +loaded in the submission script. In the example above, the host MPI is +HPE MPT 2.25, but you could also use OpenMPI (with `mpirun`), either by +loading a suitable `openmpi` module or by referencing the paths to an +OpenMPI installation that was built locally (i.e., within your Cirrus +work folder). + +## Creating Your Own Singularity Images + +You can create Singularity images by importing from DockerHub or +Singularity Hub directly to Cirrus. If you wish to create your own +custom image then you must install Singularity on a system where you +have root (or administrator) privileges - often your own laptop or +workstation. + +We provide links below to instructions on how to install Singularity +locally and then cover what options you need to include in a Singularity +definition file in order to create images that can run on Cirrus and +access the software development modules. This can be useful if you want +to create a custom environment but still want to compile and link +against libraries that you only have access to on Cirrus such as the +Intel compilers and HPE MPI libraries. + +### Installing Singularity on Your Local Machine + +You will need Singularity installed on your machine in order to locally +run, create and modify images. How you install Singularity on your +laptop/workstation depends on the operating system you are using. + +If you are using Windows or macOS, the simplest solution is to use +[Vagrant](http://www.vagrantup.com) to give you an easy to use virtual +environment with Linux and Singularity installed. The Singularity +website has instructions on how to use this method to install +Singularity. + +- [Installing Singularity on macOS with + Vagrant](https://sylabs.io/guides/3.7/admin-guide/installation.html#mac) +- [Installing Singularity on Windows with + Vagrant](https://sylabs.io/guides/3.7/admin-guide/installation.html#windows) + +If you are using Linux then you can usually install Singularity +directly. + +- [Installing Singularity on + Linux](https://sylabs.io/guides/3.7/admin-guide/installation.html#installation-on-linux) + +### Accessing Cirrus Modules from Inside a Container + +You may want your custom image to be able to access the modules +environment on Cirrus so you can make use of custom software that you +cannot access elsewhere. We demonstrate how to do this for a CentOS 7 +image but the steps are easily translated for other flavours of Linux. + +For the Cirrus modules to be available in your Singularity container you +need to ensure that the `environment-modules` package is installed in +your image. + +In addition, when you use the container you must invoke access as a +login shell to have access to the module commands. + +Below, is an example Singularity definition file that builds a CentOS 7 +image with access to TCL modules already installed on Cirrus. + + BootStrap: docker + From: centos:centos7 + + %post + yum update -y + yum install environment-modules -y + echo 'module() { eval `/usr/bin/modulecmd bash $*`; }' >> /etc/bashrc + yum install wget -y + yum install which -y + yum install squashfs-tools -y + +If we save this definition to a file called `centos7.def`, we can use +the following `build` command to build the image (remember this command +must be run on a system where you have root access, not on Cirrus). + + me@my-system:~> sudo singularity build centos7.sif centos7.def + +The resulting image file (`centos7.sif`) can then be copied to Cirrus +using scp; such an image already exists on Cirrus and can be found in +the `/scratch/sw/singularity/images` folder. + +When you use that image interactively on Cirrus you must start with a +login shell and also bind `/scratch/sw` so that the container can see +all the module files, see below. + + [user@cirrus-login1 ~]$ module load singularity + [user@cirrus-login1 ~]$ singularity exec -B /scratch/sw \ + /scratch/sw/singularity/images/centos7.sif \ + /bin/bash --login + Singularity> module avail intel-compilers + + ------------------------- /scratch/sw/modulefiles --------------------- + intel-compilers-18/18.05.274 intel-compilers-19/19.0.0.117 + Singularity> exit + logout + [user@cirrus-login1 ~]$ + +### Altering a Container on Cirrus + +A container image file is immutable but it is possible to alter the +image if you convert the file to a sandbox. The sandbox is essentially a +directory on the host system that contains the full container file +hierarchy. + +You first run the `singularity build` command to perform the conversion +followed by a `shell` command with the `--writable` option. You are now +free to change the files inside the container sandbox. + + user@cirrus-login1 ~]$ singularity build --sandbox image.sif.sandbox image.sif + user@cirrus-login1 ~]$ singularity shell -B /scratch/sw --writable image.sif.sandbox + Singularity> + +In the example above, the `/scratch/sw` bind path is specified, allowing +you to build code that links to the Cirrus module libraries. + +Finally, once you are finished with the sandbox you can exit and convert +back to the original image file. + + Singularity> exit + exit + user@cirrus-login1 ~]$ singularity build --force image.sif image.sif.sandbox + + + +!!! Note + + Altering a container in this way will cause the associated definition + file to be out of step with the current image. Care should be taken to + keep a record of the commands that were run within the sandbox so that + the image can be reproduced. + + diff --git a/docs/user-guide/solidstate.md b/docs/user-guide/solidstate.md new file mode 100644 index 00000000..c29d5fea --- /dev/null +++ b/docs/user-guide/solidstate.md @@ -0,0 +1,110 @@ +# Solid state storage + +In addition to the Lustre file system, the Cirrus login and compute +nodes have access to a shared, high-performance, solid state storage +system (also known as RPOOL). This storage system is network mounted and +shared across the login nodes and GPU compute nodes in a similar way to +the normal, spinning-disk Lustre file system but has different +performanc characteristics. + +The solid state storage has a maximum usable capacity of 256 TB which is +shared between all users. + +## Backups, quotas and data longevity + +There are no backups of any data on the solid state storage so you +should ensure that you have copies of critical data elsewhere. + +In addition, the solid state storage does not currently have any quotas +(user or group) enabled so all users are potentially able to access the +full 256 TB capacity of the storage system. We ask all users to be +considerate in their use of this shared storage system and to delete any +data on the solid state storage as soon as it no longer needs to be +there. + +We monitor the usage of the storage system by users and groups and will +potentially remove data that is stopping other users getting fair access +to the storage and data that has not been actively used for long periods +of time. + +## Accessing the solid-state storage + +You access the solid-state storage at `/scratch/space1` on both the +login nodes and on the compute nodes. + +Everybody has access to be able to create directories and add data so we +suggest that you create a directory for your project and/or user to +avoid clashes with files and data added by other users. For example, if +my project is `t01` and my username is `auser` then I could create a +directory with + + mkdir -p /scratch/space1/t01/auser + +When these directories are initially created they will be +*world-readable*. If you do not want users from other projects to be +able to see your data, you should change the permissions on your new +directory. For example, to restrict the directory so that only other +users in your project can read the data you would use: + + chmod -R o-rwx /scratch/space1/t01 + +## Copying data to/from solid-state storage + +You can move data to/from the solid-state storage in a number of +different ways: + +- By copying to/from another Cirrus file system - either interactively + on login nodes or as part of a job submission script +- By transferring directly to/from an external host via the login nodes + +### Local data transfer + +The most efficient tool for copying to/from the Cirrus file systems +(/home, +/work) to the solid state storage is +generally the `cp` command, e.g. + + cp -r /path/to/data-dir /scratch/space1/t01/auser/ + +where `/path/to/data-dir` should be replaced with the path to the data +directory you are wanting to copy and assuming, of course, that you have +setup the `t01/auser` subdirectories as described above). + + + +!!! Note + + + + If you are transferring data from your `/work` directory, these commands + can also be added to job submission scripts running on the compute nodes + to move data as part of the job. If you do this, remember to include the + data transfer time in the overall walltime for the job. + + Data from your `/home` directory is not available from the compute nodes + and must therefore be transferred from a login node. + + + +### Remote data transfer + +You can transfer data directly to the solid state storage from external +locations using `scp` or `rsync` in exactly the same way as you would +usually do to transfer data to Cirrus. Simply substitute the path to the +location on the solid state storage for that you would normally use for +Cirrus. For example, if you are on the external location (e.g. your +laptop), you could use something like: + + scp -r data_dir user@login.cirrus.ac.uk:/scratch/space1/t01/auser/ + +You can also use commands such as `wget` and `curl` to pull data from +external locations directly to the solid state storage. + + + +!!! Note + + You cannot transfer data from external locations in job scripts as the + Cirrus compute nodes do not have external network access. + + diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..92d4d4eb --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,80 @@ +theme: + name: material + favicon: favicon.ico + logo: images/cirrus_logo_white-Transparent-Background.png + palette: + primary: teal + accent: teal + features: + - tabs + icon: + repo: fontawesome/brands/github + +extra_css: [stylesheets/cirrus.css] + +site_name: Cirrus User Documentation +repo_url: https://github.com/EPCCed/cirrus-new-docs +repo_name: EPCCed/cirrus-new-docs +edit_uri: edit/main/docs/ +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/EPCCed + analytics: + provider: google + property: + analytics: + provider: google + property: + +markdown_extensions: + - admonition + - pymdownx.details + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + +nav: + - "Overview": index.md + - "User Guide": + - "Introduction": user-guide/introduction.md + - "Connecting to Cirrus": user-guide/connecting.md + - "Data Management and Transfer": user-guide/data.md + - "File and Resource Management": user-guide/resource_management.md + - "Application Developement Environment": user-guide/development.md + - "Running Jobs on Cirrus": user-guide/batch.md + - "Singularity Containers": user-guide/singularity.md + - "Using Python": user-guide/python.md + - "Using the Cirrus GPU Nodes": user-guide/gpu.md + - "Solid state storage": user-guide/solidstate.md + - "Software Applications": + - "Castep": software-packages/castep.md + - "CP2K": software-packages/cp2k.md + - "ELEMENTS": software-packages/elements.md + - "FLACS": software-packages/flacs.md + - "Gaussian": software-packages/gaussian.md + - "GROMACS": software-packages/gromacs.md + - "HELYX®": software-packages/helyx.md + - "LAMMPS": software-packages/lammps.md + - "MATLAB": software-packages/MATLAB.md + - "NAMD": software-packages/namd.md + - "OpenFOAM": software-packages/openfoam.md + - "ORCA": software-packages/orca.md + - "Quantum Espresso (QE)": software-packages/qe.md + - "STAR-CCM+": software-packages/starccm+.md + - "VASP": software-packages/vasp.md + - "SPECFEM3D Cartesian": software-packages/specfem3d.md + - "Software Libraries": + - "Intel MKL: BLAS, LAPACK, ScaLAPACK": software-libraries/intel_mkl.md + - "HDF5": software-libraries/hdf5.md + - "Software Tools": + - "Debugging using Arm DDT": software-tools/ddt.md + - "Profiling using Scalasca": software-tools/scalasca.md + - "Intel VTune": software-tools/intel-vtune.md + - "References and further reading": user-guide/reading.md + + + + + +