Skip to content

Commit

Permalink
Merge branch 'master' of github.com:yuanhuas/CoLM202X
Browse files Browse the repository at this point in the history
  • Loading branch information
yuanhuas committed Jan 11, 2025
2 parents 7b9f5fa + 98103a1 commit e8d01c6
Show file tree
Hide file tree
Showing 9 changed files with 229 additions and 71 deletions.
39 changes: 39 additions & 0 deletions include/Makeoptions.Tianhe-XY
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# =======================================================
# mpif90 - ifort
#

FF = mpif90

NETCDF_LIB = /APP/u22/x86/netcdf/4.9.2-icc-oneapi2023.2/lib/
NETCDF_INC = /APP/u22/x86/netcdf/4.9.2-icc-oneapi2023.2/include/

LAPACK_LIB = /APP/u22/x86/LAPACK/lapack-3.12.0-icc-oneapi2023.2_noimpi/lib/
BLAS_LIB = /APP/u22/x86/BLAS/icc-2023/lib-icc/

MOD_CMD = -module

FOPTS = -qopenmp -O2 -traceback -r8 -free -check uninit -check bounds

LDFLAGS = -L${NETCDF_LIB} -lnetcdff -L${LAPACK_LIB} -llapack -L${BLAS_LIB} -lblas

#============================================================
# CaMa-Flood Mkinclude (for Linux, Intel fortran)

RM = /bin/rm -f
CP = /bin/cp
#----
# Pre-Prosessing options
# DMPI=-DUseMPI: activate when MPI parallelization is used
# DCDF=-DUseCDF: activate when using netCDF, comment out when not needed
# DATM=-DNoAtom: activate when OMP ATOMIC calculation should be avoided (bit identical simulation)
#----
#DMPI=-DUseMPI
DCDF=-DUseCDF
#DATM=-DNoAtom
CFLAGS=$(DMPI) $(DCDF) $(DATM)
#----
# FCMP: main program (src/), FC: pre/post process (map/ etc/)
FCMP = ifort -qopenmp
FC = ifort
LFLAGS =
FFLAGS = -O3 -warn all -fpp -free -assume byterecl -heap-arrays -nogen-interface -lpthread -static-intel
4 changes: 2 additions & 2 deletions mkinidata/MOD_Initialize.F90
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ SUBROUTINE initialize (casename, dir_landdata, dir_restart, &
!soil type 0 1 2 3 4 5 6 7 8 9 10 11 12
!BVIC = 1.0 0.050, 0.080, 0.090, 0.250, 0.150, 0.180, 0.200, 0.220, 0.230, 0.250, 0.280, 0.300
!re-arranged BVIC for USDA soil texture class:
real(r8), parameter :: BVIC_USGS(0:12) = (/ 1., 0.300, 0.280, 0.250, 0.230, 0.220, 0.200, 0.180, 0.250, 0.090, 0.150, 0.080, 0.050/)
real(r8), parameter :: BVIC_USDA(0:12) = (/ 1., 0.300, 0.280, 0.250, 0.230, 0.220, 0.200, 0.180, 0.250, 0.090, 0.150, 0.080, 0.050/)

! --------------------------------------------------------------------
! Allocates memory for CoLM 1d [numpatch] variables
Expand Down Expand Up @@ -346,7 +346,7 @@ SUBROUTINE initialize (casename, dir_landdata, dir_restart, &
IF (p_is_worker) THEN
IF (numpatch > 0) THEN
DO ipatch = 1, numpatch
BVIC(ipatch)=BVIC_USGS(soiltext(ipatch))
BVIC(ipatch)=BVIC_USDA(soiltext(ipatch))
ENDDO
ENDIF
ENDIF
Expand Down
18 changes: 9 additions & 9 deletions run/forcing/CRUJRA.nml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
&nl_colm_forcing

! ----- forcing -----
DEF_dir_forcing = '/shr03/CoLM_Forcing/crujra/'
DEF_dir_forcing = '/shr03/CoLM_Forcing/crujra_v2.5/'

DEF_forcing%dataset = 'CRUJRA'
DEF_forcing%solarin_all_band = .true.
Expand Down Expand Up @@ -31,14 +31,14 @@

DEF_forcing%groupby = 'year' ! file grouped by year/month

DEF_forcing%fprefix(1) = 'tmp/crujra.v2.4.5d.tmp.'
DEF_forcing%fprefix(2) = 'spfh/crujra.v2.4.5d.spfh.'
DEF_forcing%fprefix(3) = 'pres/crujra.v2.4.5d.pres.'
DEF_forcing%fprefix(4) = 'pre/crujra.v2.4.5d.pre.'
DEF_forcing%fprefix(5) = 'ugrd/crujra.v2.4.5d.ugrd.'
DEF_forcing%fprefix(6) = 'vgrd/crujra.v2.4.5d.vgrd.'
DEF_forcing%fprefix(7) = 'dswrf/crujra.v2.4.5d.dswrf.'
DEF_forcing%fprefix(8) = 'dlwrf/crujra.v2.4.5d.dlwrf.'
DEF_forcing%fprefix(1) = 'tmp/crujra.v2.5.5d.tmp.'
DEF_forcing%fprefix(2) = 'spfh/crujra.v2.5.5d.spfh.'
DEF_forcing%fprefix(3) = 'pres/crujra.v2.5.5d.pres.'
DEF_forcing%fprefix(4) = 'pre/crujra.v2.5.5d.pre.'
DEF_forcing%fprefix(5) = 'ugrd/crujra.v2.5.5d.ugrd.'
DEF_forcing%fprefix(6) = 'vgrd/crujra.v2.5.5d.vgrd.'
DEF_forcing%fprefix(7) = 'dswrf/crujra.v2.5.5d.dswrf.'
DEF_forcing%fprefix(8) = 'dlwrf/crujra.v2.5.5d.dlwrf.'


DEF_forcing%vname = 'tmp' 'spfh' 'pres' 'pre' 'ugrd' 'vgrd' 'dswrf' 'dlwrf'
Expand Down
54 changes: 0 additions & 54 deletions run/scripts/batch.config

This file was deleted.

1 change: 1 addition & 0 deletions run/scripts/batch.config
54 changes: 54 additions & 0 deletions run/scripts/batch.github.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#------------------------------earthlab--------------------------------------------
##!/bin/bash
#
##SBATCH -J <CASENAME>
##SBATCH -p <QUEUE>
##SBATCH -N <NNODES>
##SBATCH -n <NPROCESSES>
##SBATCH --ntasks-per-node=<NTASKSPERNODE>
##SBATCH --mem=<MEMORY>
##SBATCH -o colm.o%j
##SBATCH -e colm.e%j
##SBATCH --exclusive
##SBATCH -t <WALLTIME>
##
##module purge
##module load compiler/intel/2021.3.1
##module load mpi/intelmpi/2018.4.274
##module load mathlib/netcdf/intel/4.4.1
##module load mathlib/hdf5/intel/1.8.20
##
##export I_MPI_FABRICS=shm:dapl
##export I_MPI_DAPL_UD=1
##export I_MPI_DAPL_UD_RDMA_MIXED=1
##export I_MPI_LARGE_SCALE_THRESHOLD=8192
##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704
##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704
##export I_MPI_DAPL_UD_RNDV_EP_NUM=2
##
##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs
##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs
##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries
##export DAPL_UCM_CQ_SIZE=2000
##export DAPL_UCM_QP_SIZE=2000
##
##export DAPL_UCM_DREQ_RETRY=4 #default == 1
##export DAPL_UCM_DREP_TIME=200 #default == 200ms
##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms
##
##ulimit -s unlimited
##scontrol show hostname > nd
##NP=$SLURM_NPROCS


#-------------------------------baiduboat------------------------------------------
#!/bin/bash

#BSUB -J <CASENAME>
#BSUB -q <QUEUE>
#BSUB -o colm.o%
#BSUB -e colm.e%
#BSUB -n <NPROCESSES>
#BSUB -R rusage[mem=<MEMORY>]
#BSUB -R span[ptile=<NTASKSPERNODE>]

44 changes: 44 additions & 0 deletions run/scripts/batch.slurm-earthlab.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#------------------------------earthlab--------------------------------------------
##!/bin/bash
#
##SBATCH -J <CASENAME>
##SBATCH -p <QUEUE>
##SBATCH -N <NNODES>
##SBATCH -n <NPROCESSES>
##SBATCH --ntasks-per-node=<NTASKSPERNODE>
##SBATCH --mem=<MEMORY>
##SBATCH -o colm.o%j
##SBATCH -e colm.e%j
##SBATCH --exclusive
##SBATCH -t <WALLTIME>
##
##module purge
##module load compiler/intel/2021.3.1
##module load mpi/intelmpi/2018.4.274
##module load mathlib/netcdf/intel/4.4.1
##module load mathlib/hdf5/intel/1.8.20
##
##export I_MPI_FABRICS=shm:dapl
##export I_MPI_DAPL_UD=1
##export I_MPI_DAPL_UD_RDMA_MIXED=1
##export I_MPI_LARGE_SCALE_THRESHOLD=8192
##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704
##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704
##export I_MPI_DAPL_UD_RNDV_EP_NUM=2
##
##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs
##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs
##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries
##export DAPL_UCM_CQ_SIZE=2000
##export DAPL_UCM_QP_SIZE=2000
##
##export DAPL_UCM_DREQ_RETRY=4 #default == 1
##export DAPL_UCM_DREP_TIME=200 #default == 200ms
##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms
##
##ulimit -s unlimited
##scontrol show hostname > nd
##NP=$SLURM_NPROCS



16 changes: 16 additions & 0 deletions run/scripts/batch.slurm-tianhexy.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#------------------------------tianhexy--------------------------------------------
#!/bin/bash

#SBATCH -J <CASENAME>
#SBATCH -p <QUEUE>
#SBATCH -N <NNODES>
#SBATCH -n <NPROCESSES>
#SBATCH --ntasks-per-node=<NTASKSPERNODE>
#SBATCH --mem=<MEMORY>
#SBATCH -o colm.o%j
#SBATCH -e colm.e%j
#SBATCH --exclusive
#SBATCH -t <WALLTIME>



44 changes: 44 additions & 0 deletions run/scripts/batch.slurm.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#------------------------------earthlab--------------------------------------------
##!/bin/bash
#
##SBATCH -J <CASENAME>
##SBATCH -p <QUEUE>
##SBATCH -N <NNODES>
##SBATCH -n <NPROCESSES>
##SBATCH --ntasks-per-node=<NTASKSPERNODE>
##SBATCH --mem=<MEMORY>
##SBATCH -o colm.o%j
##SBATCH -e colm.e%j
##SBATCH --exclusive
##SBATCH -t <WALLTIME>
##
##module purge
##module load compiler/intel/2021.3.1
##module load mpi/intelmpi/2018.4.274
##module load mathlib/netcdf/intel/4.4.1
##module load mathlib/hdf5/intel/1.8.20
##
##export I_MPI_FABRICS=shm:dapl
##export I_MPI_DAPL_UD=1
##export I_MPI_DAPL_UD_RDMA_MIXED=1
##export I_MPI_LARGE_SCALE_THRESHOLD=8192
##export I_MPI_DAPL_UD_ACK_SEND_POOL_SIZE=8704
##export I_MPI_DAPL_UD_ACK_RECV_POOL_SIZE=8704
##export I_MPI_DAPL_UD_RNDV_EP_NUM=2
##
##export DAPL_UCM_REP_TIME=8000 # REQUEST timer, waiting for REPLY in millisecs
##export DAPL_UCM_RTU_TIME=8000 # REPLY timer, waiting for RTU in millisecs
##export DAPL_UCM_RETRY=10 # REQUEST and REPLY retries
##export DAPL_UCM_CQ_SIZE=2000
##export DAPL_UCM_QP_SIZE=2000
##
##export DAPL_UCM_DREQ_RETRY=4 #default == 1
##export DAPL_UCM_DREP_TIME=200 #default == 200ms
##export DAPL_UCM_WAIT_TIME=10000 #default == 60000ms
##
##ulimit -s unlimited
##scontrol show hostname > nd
##NP=$SLURM_NPROCS



Loading

0 comments on commit e8d01c6

Please sign in to comment.