diff --git a/include/Makeoptions.Tianhe-XY b/include/Makeoptions.Tianhe-XY new file mode 100755 index 00000000..55363215 --- /dev/null +++ b/include/Makeoptions.Tianhe-XY @@ -0,0 +1,39 @@ +# ======================================================= +# mpif90 - ifort +# + + FF = mpif90 + + NETCDF_LIB = /APP/u22/x86/netcdf/4.9.2-icc-oneapi2023.2/lib/ + NETCDF_INC = /APP/u22/x86/netcdf/4.9.2-icc-oneapi2023.2/include/ + + LAPACK_LIB = /APP/u22/x86/LAPACK/lapack-3.12.0-icc-oneapi2023.2_noimpi/lib/ + BLAS_LIB = /APP/u22/x86/BLAS/icc-2023/lib-icc/ + + MOD_CMD = -module + + FOPTS = -qopenmp -O2 -traceback -r8 -free -check uninit -check bounds + + LDFLAGS = -L${NETCDF_LIB} -lnetcdff -L${LAPACK_LIB} -llapack -L${BLAS_LIB} -lblas + +#============================================================ +# CaMa-Flood Mkinclude (for Linux, Intel fortran) + +RM = /bin/rm -f +CP = /bin/cp +#---- +# Pre-Prosessing options +# DMPI=-DUseMPI: activate when MPI parallelization is used +# DCDF=-DUseCDF: activate when using netCDF, comment out when not needed +# DATM=-DNoAtom: activate when OMP ATOMIC calculation should be avoided (bit identical simulation) +#---- +#DMPI=-DUseMPI +DCDF=-DUseCDF +#DATM=-DNoAtom +CFLAGS=$(DMPI) $(DCDF) $(DATM) +#---- +# FCMP: main program (src/), FC: pre/post process (map/ etc/) +FCMP = ifort -qopenmp +FC = ifort +LFLAGS = +FFLAGS = -O3 -warn all -fpp -free -assume byterecl -heap-arrays -nogen-interface -lpthread -static-intel diff --git a/mkinidata/MOD_Initialize.F90 b/mkinidata/MOD_Initialize.F90 index 921e0eb5..43e6b7d5 100644 --- a/mkinidata/MOD_Initialize.F90 +++ b/mkinidata/MOD_Initialize.F90 @@ -247,7 +247,7 @@ SUBROUTINE initialize (casename, dir_landdata, dir_restart, & !soil type 0 1 2 3 4 5 6 7 8 9 10 11 12 !BVIC = 1.0 0.050, 0.080, 0.090, 0.250, 0.150, 0.180, 0.200, 0.220, 0.230, 0.250, 0.280, 0.300 !re-arranged BVIC for USDA soil texture class: - real(r8), parameter :: BVIC_USGS(0:12) = (/ 1., 0.300, 0.280, 0.250, 0.230, 0.220, 0.200, 0.180, 0.250, 0.090, 0.150, 0.080, 0.050/) + real(r8), parameter :: BVIC_USDA(0:12) = (/ 1., 0.300, 0.280, 0.250, 0.230, 0.220, 0.200, 0.180, 0.250, 0.090, 0.150, 0.080, 0.050/) ! -------------------------------------------------------------------- ! Allocates memory for CoLM 1d [numpatch] variables @@ -346,7 +346,7 @@ SUBROUTINE initialize (casename, dir_landdata, dir_restart, & IF (p_is_worker) THEN IF (numpatch > 0) THEN DO ipatch = 1, numpatch - BVIC(ipatch)=BVIC_USGS(soiltext(ipatch)) + BVIC(ipatch)=BVIC_USDA(soiltext(ipatch)) ENDDO ENDIF ENDIF diff --git a/run/forcing/CRUJRA.nml b/run/forcing/CRUJRA.nml index 874b701f..999c2f4f 100644 --- a/run/forcing/CRUJRA.nml +++ b/run/forcing/CRUJRA.nml @@ -1,7 +1,7 @@ &nl_colm_forcing ! ----- forcing ----- - DEF_dir_forcing = '/shr03/CoLM_Forcing/crujra/' + DEF_dir_forcing = '/shr03/CoLM_Forcing/crujra_v2.5/' DEF_forcing%dataset = 'CRUJRA' DEF_forcing%solarin_all_band = .true. @@ -31,14 +31,14 @@ DEF_forcing%groupby = 'year' ! file grouped by year/month - DEF_forcing%fprefix(1) = 'tmp/crujra.v2.4.5d.tmp.' - DEF_forcing%fprefix(2) = 'spfh/crujra.v2.4.5d.spfh.' - DEF_forcing%fprefix(3) = 'pres/crujra.v2.4.5d.pres.' - DEF_forcing%fprefix(4) = 'pre/crujra.v2.4.5d.pre.' - DEF_forcing%fprefix(5) = 'ugrd/crujra.v2.4.5d.ugrd.' - DEF_forcing%fprefix(6) = 'vgrd/crujra.v2.4.5d.vgrd.' - DEF_forcing%fprefix(7) = 'dswrf/crujra.v2.4.5d.dswrf.' - DEF_forcing%fprefix(8) = 'dlwrf/crujra.v2.4.5d.dlwrf.' + DEF_forcing%fprefix(1) = 'tmp/crujra.v2.5.5d.tmp.' + DEF_forcing%fprefix(2) = 'spfh/crujra.v2.5.5d.spfh.' + DEF_forcing%fprefix(3) = 'pres/crujra.v2.5.5d.pres.' + DEF_forcing%fprefix(4) = 'pre/crujra.v2.5.5d.pre.' + DEF_forcing%fprefix(5) = 'ugrd/crujra.v2.5.5d.ugrd.' + DEF_forcing%fprefix(6) = 'vgrd/crujra.v2.5.5d.vgrd.' + DEF_forcing%fprefix(7) = 'dswrf/crujra.v2.5.5d.dswrf.' + DEF_forcing%fprefix(8) = 'dlwrf/crujra.v2.5.5d.dlwrf.' DEF_forcing%vname = 'tmp' 'spfh' 'pres' 'pre' 'ugrd' 'vgrd' 'dswrf' 'dlwrf' diff --git a/run/scripts/batch.config b/run/scripts/batch.config deleted file mode 100644 index 9e17cb99..00000000 --- a/run/scripts/batch.config +++ /dev/null @@ -1,54 +0,0 @@ -#------------------------------earthlab-------------------------------------------- -##!/bin/bash -# -##SBATCH -J -##SBATCH -p -##SBATCH -N -##SBATCH -n -##SBATCH --ntasks-per-node= -##SBATCH --mem= -##SBATCH -o colm.o%j -##SBATCH -e colm.e%j -##SBATCH --exclusive -##SBATCH -t -## -##module purge -##module load compiler/intel/2021.3.1 -##module load mpi/intelmpi/2018.4.274 -##module load mathlib/netcdf/intel/4.4.1 -##module load mathlib/hdf5/intel/1.8.20 -## -##export I_MPI_FABRICS=shm:dapl -##export I_MPI_DAPL_UD=1 -##export I_MPI_DAPL_UD_RDMA_MIXED=1 -##export I_MPI_LARGE_SCALE_THRESHOLD=8192 -##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704 -##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704 -##export I_MPI_DAPL_UD_RNDV_EP_NUM=2 -## -##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs -##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs -##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries -##export DAPL_UCM_CQ_SIZE=2000 -##export DAPL_UCM_QP_SIZE=2000 -## -##export DAPL_UCM_DREQ_RETRY=4 #default == 1 -##export DAPL_UCM_DREP_TIME=200 #default == 200ms -##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms -## -##ulimit -s unlimited -##scontrol show hostname > nd -##NP=$SLURM_NPROCS - - -#-------------------------------baiduboat------------------------------------------ -#!/bin/bash - -#BSUB -J -#BSUB -q -#BSUB -o colm.o% -#BSUB -e colm.e% -#BSUB -n -#BSUB -R rusage[mem=] -#BSUB -R span[ptile=] - diff --git a/run/scripts/batch.config b/run/scripts/batch.config new file mode 120000 index 00000000..3fa5d09a --- /dev/null +++ b/run/scripts/batch.config @@ -0,0 +1 @@ +batch.github.config \ No newline at end of file diff --git a/run/scripts/batch.github.config b/run/scripts/batch.github.config new file mode 100644 index 00000000..9e17cb99 --- /dev/null +++ b/run/scripts/batch.github.config @@ -0,0 +1,54 @@ +#------------------------------earthlab-------------------------------------------- +##!/bin/bash +# +##SBATCH -J +##SBATCH -p +##SBATCH -N +##SBATCH -n +##SBATCH --ntasks-per-node= +##SBATCH --mem= +##SBATCH -o colm.o%j +##SBATCH -e colm.e%j +##SBATCH --exclusive +##SBATCH -t +## +##module purge +##module load compiler/intel/2021.3.1 +##module load mpi/intelmpi/2018.4.274 +##module load mathlib/netcdf/intel/4.4.1 +##module load mathlib/hdf5/intel/1.8.20 +## +##export I_MPI_FABRICS=shm:dapl +##export I_MPI_DAPL_UD=1 +##export I_MPI_DAPL_UD_RDMA_MIXED=1 +##export I_MPI_LARGE_SCALE_THRESHOLD=8192 +##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_RNDV_EP_NUM=2 +## +##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs +##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs +##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries +##export DAPL_UCM_CQ_SIZE=2000 +##export DAPL_UCM_QP_SIZE=2000 +## +##export DAPL_UCM_DREQ_RETRY=4 #default == 1 +##export DAPL_UCM_DREP_TIME=200 #default == 200ms +##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms +## +##ulimit -s unlimited +##scontrol show hostname > nd +##NP=$SLURM_NPROCS + + +#-------------------------------baiduboat------------------------------------------ +#!/bin/bash + +#BSUB -J +#BSUB -q +#BSUB -o colm.o% +#BSUB -e colm.e% +#BSUB -n +#BSUB -R rusage[mem=] +#BSUB -R span[ptile=] + diff --git a/run/scripts/batch.slurm-earthlab.config b/run/scripts/batch.slurm-earthlab.config new file mode 100644 index 00000000..b077f555 --- /dev/null +++ b/run/scripts/batch.slurm-earthlab.config @@ -0,0 +1,44 @@ +#------------------------------earthlab-------------------------------------------- +##!/bin/bash +# +##SBATCH -J +##SBATCH -p +##SBATCH -N +##SBATCH -n +##SBATCH --ntasks-per-node= +##SBATCH --mem= +##SBATCH -o colm.o%j +##SBATCH -e colm.e%j +##SBATCH --exclusive +##SBATCH -t +## +##module purge +##module load compiler/intel/2021.3.1 +##module load mpi/intelmpi/2018.4.274 +##module load mathlib/netcdf/intel/4.4.1 +##module load mathlib/hdf5/intel/1.8.20 +## +##export I_MPI_FABRICS=shm:dapl +##export I_MPI_DAPL_UD=1 +##export I_MPI_DAPL_UD_RDMA_MIXED=1 +##export I_MPI_LARGE_SCALE_THRESHOLD=8192 +##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_RNDV_EP_NUM=2 +## +##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs +##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs +##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries +##export DAPL_UCM_CQ_SIZE=2000 +##export DAPL_UCM_QP_SIZE=2000 +## +##export DAPL_UCM_DREQ_RETRY=4 #default == 1 +##export DAPL_UCM_DREP_TIME=200 #default == 200ms +##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms +## +##ulimit -s unlimited +##scontrol show hostname > nd +##NP=$SLURM_NPROCS + + + diff --git a/run/scripts/batch.slurm-tianhexy.config b/run/scripts/batch.slurm-tianhexy.config new file mode 100644 index 00000000..f272784f --- /dev/null +++ b/run/scripts/batch.slurm-tianhexy.config @@ -0,0 +1,16 @@ +#------------------------------tianhexy-------------------------------------------- +#!/bin/bash + +#SBATCH -J +#SBATCH -p +#SBATCH -N +#SBATCH -n +#SBATCH --ntasks-per-node= +#SBATCH --mem= +#SBATCH -o colm.o%j +#SBATCH -e colm.e%j +#SBATCH --exclusive +#SBATCH -t + + + diff --git a/run/scripts/batch.slurm.config b/run/scripts/batch.slurm.config new file mode 100644 index 00000000..b077f555 --- /dev/null +++ b/run/scripts/batch.slurm.config @@ -0,0 +1,44 @@ +#------------------------------earthlab-------------------------------------------- +##!/bin/bash +# +##SBATCH -J +##SBATCH -p +##SBATCH -N +##SBATCH -n +##SBATCH --ntasks-per-node= +##SBATCH --mem= +##SBATCH -o colm.o%j +##SBATCH -e colm.e%j +##SBATCH --exclusive +##SBATCH -t +## +##module purge +##module load compiler/intel/2021.3.1 +##module load mpi/intelmpi/2018.4.274 +##module load mathlib/netcdf/intel/4.4.1 +##module load mathlib/hdf5/intel/1.8.20 +## +##export I_MPI_FABRICS=shm:dapl +##export I_MPI_DAPL_UD=1 +##export I_MPI_DAPL_UD_RDMA_MIXED=1 +##export I_MPI_LARGE_SCALE_THRESHOLD=8192 +##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704 +##export I_MPI_DAPL_UD_RNDV_EP_NUM=2 +## +##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs +##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs +##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries +##export DAPL_UCM_CQ_SIZE=2000 +##export DAPL_UCM_QP_SIZE=2000 +## +##export DAPL_UCM_DREQ_RETRY=4 #default == 1 +##export DAPL_UCM_DREP_TIME=200 #default == 200ms +##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms +## +##ulimit -s unlimited +##scontrol show hostname > nd +##NP=$SLURM_NPROCS + + + diff --git a/run/scripts/create_scripts b/run/scripts/create_scripts index ccb41de2..c19eda35 100755 --- a/run/scripts/create_scripts +++ b/run/scripts/create_scripts @@ -71,6 +71,8 @@ CreateHeader() if [ -f $CONFIG ];then case ${SCRIPTNAME} in mksrf.submit) + EXEC=`awk '/Exe_command/ {print $2}' $CONFIG` + EXEO=`awk '/Exe_opt/ {print $2}' $CONFIG` NP=`awk '/NProcesses_mksrf/ {print $2}' $CONFIG` NN=`awk '/NNodes_mksrf/ {print $2}' $CONFIG` NTPN=`awk '/NTasksPerNode_mksrf/ {print $2}' $CONFIG` @@ -80,6 +82,8 @@ CreateHeader() OUTPUT='mksrf' ;; init.submit) + EXEC=`awk '/Exe_command/ {print $2 $3}' $CONFIG` + EXEO=`awk '/Exe_opt/ {print $2}' $CONFIG` NP=`awk '/NProcesses_mkini/ {print $2}' $CONFIG` NN=`awk '/NNodes_mkini/ {print $2}' $CONFIG` NTPN=`awk '/NTasksPerNode_mkini/ {print $2}' $CONFIG` @@ -89,6 +93,8 @@ CreateHeader() OUTPUT='init' ;; case.submit) + EXEC=`awk '/Exe_command/ {print $2 $3}' $CONFIG` + EXEO=`awk '/Exe_opt/ {print $2}' $CONFIG` NP=`awk '/NProcesses_case/ {print $2}' $CONFIG` NN=`awk '/NNodes_case/ {print $2}' $CONFIG` NTPN=`awk '/NTasksPerNode_case/ {print $2}' $CONFIG` @@ -106,6 +112,8 @@ CreateHeader() echo SCRIPTNAME $SCRIPTNAME echo HEADER $HEADER echo CONFIG $CONFIG + echo EXEC $EXEC + echo EXEO $EXEO if [ $NP -gt 0 ] 2>/dev/null;then echo Number of processes is assigned from $CONFIG else @@ -150,6 +158,8 @@ CreateHeader() WT=24:00:00 QUEUE=normal OUTPUT='mksrf' + EXEC='mpirun' + EXEO='-np' ;; init.submit) NP=24 @@ -159,6 +169,8 @@ CreateHeader() WT=24:00:00 QUEUE=normal OUTPUT='init' + EXEC='mpirun' + EXEO='-np' ;; case.submit) NP=120 @@ -168,6 +180,8 @@ CreateHeader() WT=24:00:00 QUEUE=normal OUTPUT='case' + EXEC='mpirun' + EXEO='-np' ;; *) echo "Error: Unknown Unknown script name: $SCRIPTNAME in CreateScripts" @@ -231,7 +245,7 @@ CreateScripts() fi cat>>mksrf.submit< ../../logmksrfdata +${EXE} $NP ./mksrfdata.x ../../input_${CASENAME}.nml > ../../logmksrfdata EOF @@ -257,7 +271,7 @@ EOF fi cat>>init.submit< ../../logini +${EXEC} ${EXEO} $NP ./mkinidata.x ../../input_${CASENAME}.nml > ../../logini EOF @@ -285,7 +299,7 @@ EOF none) cat>>'case.submit'< ../../log +${EXEC} ${EXEO} $NP ./colm.x ../../input_${CASENAME}.nml > ../../log EOF ;; nd) @@ -294,7 +308,7 @@ iloop=1 while [ ${ILOOP} -le 100 ] do cd ${CASEPATH}/${CASENAME}/bld/run/ - mpirun -np $NP ./colm.x ../../input_${CASENAME}.nml > ../../log-${ILOOP} + ${EXEC} ${EXEO} $NP ./colm.x ../../input_${CASENAME}.nml > ../../log-${ILOOP} mkdir -p ${CASEPATH}/${CASENAME}/restart/loop-${ILOOP} mkdir -p ${CASEPATH}/${CASENAME}/history/loop-${ILOOP} cd ${CASEPATH}/${CASENAME}/restart @@ -322,9 +336,9 @@ while [ ${ILOOP} -le 130 ] do cd ${CASEPATH}/${CASENAME}/bld/run/ if [ ${ILOOP} -le 100 ];then - mpirun -np 240 ./colm.x ../../input_${CASENAME}-SASU.nml > ../../log-${ILOOP} + ${EXEC} ${EXEO} 240 ./colm.x ../../input_${CASENAME}-SASU.nml > ../../log-${ILOOP} else - mpirun -np 240 ./colm.x ../../input_${CASENAME}.nml > ../../log-${ILOOP} + ${EXEC} ${EXEO} 240 ./colm.x ../../input_${CASENAME}.nml > ../../log-${ILOOP} fi mkdir -p ${CASEPATH}/${CASENAME}/restart/loop-${ILOOP} mkdir -p ${CASEPATH}/${CASENAME}/history/loop-${ILOOP}