From 212d163512da415a0d7eca7bb87906a6d960f51a Mon Sep 17 00:00:00 2001 From: "guoqing.ge" Date: Thu, 6 Sep 2018 16:07:59 -0600 Subject: [PATCH] VLab Issue #54207, #54200, #54296: Bug fixes from MPMC (multiple platforms and multiple compilers) tests of ProdGSI for next community release. Fixes #54207, #54200, #54296. Change-Id: I52c8853df6bb10131109f25756f2a19b94472475 --- .gitignore | 1 + CMakeLists.txt | 23 +- cmake/Modules/FindNetCDF.cmake | 25 + cmake/Modules/setCompilerFlags.cmake | 54 +- doc/EnKF_user_guide/.gitignore | 10 + doc/EnKF_user_guide/README | 24 + doc/EnKF_user_guide/ack.tex | 2 +- doc/EnKF_user_guide/enkf_appendix_A.tex | 75 +- doc/EnKF_user_guide/enkf_ch1.tex | 28 +- doc/EnKF_user_guide/enkf_ch2.tex | 36 +- doc/EnKF_user_guide/enkf_ch3.tex | 2 +- doc/EnKF_user_guide/enkf_ch4.tex | 2 +- doc/EnKF_user_guide/enkf_ch5.tex | 2 +- doc/EnKF_user_guide/enkf_ch6.tex | 2 +- doc/EnKF_user_guide/forward.tex | 20 +- doc/EnKF_user_guide/references.bib | 137 ++ doc/EnKF_user_guide/title.tex | 20 +- doc/GSI_user_guide/.gitignore | 10 + doc/GSI_user_guide/README | 24 + doc/GSI_user_guide/ack.tex | 2 +- doc/GSI_user_guide/forward.tex | 30 +- doc/GSI_user_guide/gsi_appendixA.tex | 78 +- doc/GSI_user_guide/gsi_appendixB.tex | 2 +- doc/GSI_user_guide/gsi_appendixC.tex | 2 +- doc/GSI_user_guide/gsi_ch1.tex | 105 +- doc/GSI_user_guide/gsi_ch2.tex | 324 ++-- doc/GSI_user_guide/gsi_ch3.tex | 462 ++--- doc/GSI_user_guide/gsi_ch4.tex | 310 ++-- doc/GSI_user_guide/gsi_ch5.tex | 318 ++-- doc/GSI_user_guide/gsi_ch6.tex | 78 +- doc/GSI_user_guide/references.bib | 2 +- doc/GSI_user_guide/title.tex | 12 +- fix | 2 +- libsrc | 2 +- src/CMakeLists.txt | 2 +- src/control2state.f90 | 19 +- src/control2state_ad.f90 | 17 +- src/crtm_interface.f90 | 7 +- src/enkf/CMakeLists.txt | 5 +- src/enkf/gridinfo_gfs.f90 | 4 +- src/enkf/gridinfo_nmmb.f90 | 4 +- src/enkf/gridinfo_wrf.f90 | 4 +- src/enkf/gridio_nmmb.f90 | 4 +- src/enkf/gridio_wrf.f90 | 5 +- src/enkf/observer_nmmb.f90 | 131 +- src/enkf/observer_reg.f90 | 172 +- src/enkf/observer_wrf.f90 | 131 +- src/enkf/smooth_gfs.f90 | 1 + src/get_gefs_for_regional.f90 | 2 + src/m_obsLList.F90 | 4 +- src/mod_fv3_lola.f90 | 70 +- src/radiance_mod.f90 | 4 +- src/read_files.f90 | 2 +- src/setupaod.f90 | 2 +- src/setupoz.f90 | 3 +- ush/comenkf_namelist.sh | 131 ++ ush/comenkf_namelist_gfs.sh | 127 ++ ush/comenkf_run_gfs.ksh | 223 +++ ush/comenkf_run_regional.ksh | 183 ++ ush/comgsi_namelist.sh | 156 ++ ush/comgsi_namelist_chem.sh | 79 + ush/comgsi_namelist_gfs.sh | 136 ++ ush/comgsi_run_chem.ksh | 448 +++++ ush/comgsi_run_gfs.ksh | 734 ++++++++ ush/comgsi_run_regional.ksh | 666 ++++++++ .../read_diag/CMakeLists.txt | 11 + util/EnKF/arw/run/anavinfo | 77 + util/EnKF/arw/run/namelist.input | 17 + util/EnKF/arw/run/run_init.ksh | 26 + util/EnKF/arw/run/run_pro.ksh | 26 + util/EnKF/arw/src/CMakeLists.txt | 8 + .../src/enspreproc_regional.fd/CMakeLists.txt | 13 + .../get_gefs_for_regional_enspro.f90 | 1492 +++++++++++++++++ .../src/enspreproc_regional.fd/gfsp2wrfg.f90 | 167 ++ .../guess_grids_enspro.f90 | 184 ++ .../read_wrf_mass_guess4ens.F90 | 233 +++ .../wrf_netcdf_interface4ens.F90 | 1267 ++++++++++++++ .../src/initialens_regional.fd/CMakeLists.txt | 12 + .../initial_arw_ens.f90 | 322 ++++ .../src/initialens_regional.fd/namelist.input | 3 + .../read_netcdf_mass.f90 | 95 ++ .../update_netcdf_mass.f90 | 95 ++ util/gsienvreport.sh | 55 + util/ndate/CMakeLists.txt | 2 +- util/radar_process/radialwind/CMakeLists.txt | 20 + util/radar_process/radialwind/makefile | 34 - .../radar_process/reflectivity/CMakeLists.txt | 9 + util/radar_process/reflectivity/makefile | 29 - 88 files changed, 8672 insertions(+), 1227 deletions(-) create mode 100644 .gitignore create mode 100644 doc/EnKF_user_guide/.gitignore create mode 100644 doc/EnKF_user_guide/README create mode 100644 doc/EnKF_user_guide/references.bib create mode 100644 doc/GSI_user_guide/.gitignore create mode 100644 doc/GSI_user_guide/README mode change 100644 => 120000 src/enkf/observer_nmmb.f90 mode change 100644 => 120000 src/enkf/observer_wrf.f90 create mode 100755 ush/comenkf_namelist.sh create mode 100755 ush/comenkf_namelist_gfs.sh create mode 100755 ush/comenkf_run_gfs.ksh create mode 100755 ush/comenkf_run_regional.ksh create mode 100755 ush/comgsi_namelist.sh create mode 100755 ush/comgsi_namelist_chem.sh create mode 100755 ush/comgsi_namelist_gfs.sh create mode 100755 ush/comgsi_run_chem.ksh create mode 100755 ush/comgsi_run_gfs.ksh create mode 100755 ush/comgsi_run_regional.ksh create mode 100644 util/Analysis_Utilities/read_diag/CMakeLists.txt create mode 100644 util/EnKF/arw/run/anavinfo create mode 100644 util/EnKF/arw/run/namelist.input create mode 100755 util/EnKF/arw/run/run_init.ksh create mode 100755 util/EnKF/arw/run/run_pro.ksh create mode 100644 util/EnKF/arw/src/CMakeLists.txt create mode 100644 util/EnKF/arw/src/enspreproc_regional.fd/CMakeLists.txt create mode 100644 util/EnKF/arw/src/enspreproc_regional.fd/get_gefs_for_regional_enspro.f90 create mode 100644 util/EnKF/arw/src/enspreproc_regional.fd/gfsp2wrfg.f90 create mode 100644 util/EnKF/arw/src/enspreproc_regional.fd/guess_grids_enspro.f90 create mode 100644 util/EnKF/arw/src/enspreproc_regional.fd/read_wrf_mass_guess4ens.F90 create mode 100755 util/EnKF/arw/src/enspreproc_regional.fd/wrf_netcdf_interface4ens.F90 create mode 100644 util/EnKF/arw/src/initialens_regional.fd/CMakeLists.txt create mode 100644 util/EnKF/arw/src/initialens_regional.fd/initial_arw_ens.f90 create mode 100644 util/EnKF/arw/src/initialens_regional.fd/namelist.input create mode 100755 util/EnKF/arw/src/initialens_regional.fd/read_netcdf_mass.f90 create mode 100755 util/EnKF/arw/src/initialens_regional.fd/update_netcdf_mass.f90 create mode 100755 util/gsienvreport.sh create mode 100644 util/radar_process/radialwind/CMakeLists.txt delete mode 100644 util/radar_process/radialwind/makefile create mode 100644 util/radar_process/reflectivity/CMakeLists.txt delete mode 100644 util/radar_process/reflectivity/makefile diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..378eac25d --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +build diff --git a/CMakeLists.txt b/CMakeLists.txt index 25bbeb4a8..58f01d6f7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,9 +40,11 @@ project(GSI) option(BUILD_NMMB "Build the Enkf with NMMB module " OFF) option(BUILD_WRF "Build the Enkf with WRF module " OFF) option(BUILD_UTIL "Build the Enkf utilities " OFF) + option(BUILD_UTIL_COM "Build community utilities " OFF) option(BUILD_NCDIAG "Build the NCDIAG libraries" ON) option(BUILD_REG_TESTING "Build the Regression Testing Suite" ON) option(BUILD_UNIT_TESTING "Build the Unit Testing Suite" OFF) + option(MPI3 "Build EnKF with MPI3" OFF) cmake_minimum_required(VERSION 2.8) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/Modules/") set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/lib") @@ -60,12 +62,18 @@ project(GSI) find_package(GSICONTROL) include( CTest ) endif(BUILD_REG_TESTING) + if(MPI3) + set( MPI3FLAG "-DMPI3" CACHE INTERNAL "" ) + else() + set( MPI3FLAG "" CACHE INTERNAL "" ) + endif() cmake_policy(SET CMP0009 NEW) site_name(HOSTNAME) message("The hostname is ${HOSTNAME}" ) string(REGEX MATCH "s4-" HOST-S4 ${HOSTNAME} ) string(REGEX MATCH "tfe[0-9]" HOST-Theia ${HOSTNAME} ) + string(REGEX MATCH "fe[0-9]" HOST-Jet ${HOSTNAME} ) string(REGEX MATCH "g[0-9][0-9]a" HOST-Gyre ${HOSTNAME} ) string(REGEX MATCH "t[0-9][0-9]a" HOST-Tide ${HOSTNAME} ) string(REGEX MATCH "v[0-9][0-9]a" HOST-Venus ${HOSTNAME} ) @@ -74,11 +82,14 @@ project(GSI) string(REGEX MATCH "slogin" HOST-Surge ${HOSTNAME} ) string(REGEX MATCH "discover" HOST-Discover ${HOSTNAME}) string(REGEX MATCH "cheyenne" HOST-Cheyenne ${HOSTNAME} ) - string(REGEX MATCH "cheyenne" HOST-Cheyenne ${HOSTNAME} ) - if(EXISTS /jetmon) + if ( BUILD_CORELIBS ) + MESSAGE(STATUS "BUILD_CORELIBS manually-specified as ON") + set( host "GENERIC" ) + setGeneric() + elseif(EXISTS /jetmon) set( host "Jet" ) setJet() - elseif( HOST-S4 ) + elseif( HOST-S4 ) option(BUILD_CORELIBS "Build the Core libraries " ON) set( host "S4" ) setS4() @@ -237,6 +248,12 @@ project(GSI) add_subdirectory(util/Radiance_Monitor/nwprod/radmon_shared.v3.0.0/sorc/verf_radbcor.fd) add_subdirectory(util/Radiance_Monitor/nwprod/radmon_shared.v3.0.0/sorc/verf_radtime.fd) endif(BUILD_UTIL) + if(BUILD_UTIL_COM) + add_subdirectory(util/Analysis_Utilities/read_diag) + add_subdirectory(util/radar_process/radialwind) + add_subdirectory(util/radar_process/reflectivity) + add_subdirectory(util/EnKF/arw/src) + endif(BUILD_UTIL_COM) add_subdirectory(util/ndate) if(BUILD_UNIT_TESTING) set(CTEST_TEST_TIMEOUT 400 ) diff --git a/cmake/Modules/FindNetCDF.cmake b/cmake/Modules/FindNetCDF.cmake index 86fdf80c0..7121ce26a 100644 --- a/cmake/Modules/FindNetCDF.cmake +++ b/cmake/Modules/FindNetCDF.cmake @@ -39,6 +39,8 @@ elseif(DEFINED ENV{NETCDF_HOME}) set(NETCDF_DIR $ENV{NETCDF_HOME}) elseif( DEFINED ENV{NETCDF} ) set(NETCDF_DIR $ENV{NETCDF}) +elseif( DEFINED ENV{NETCDF4} ) + set(NETCDF_DIR $ENV{NETCDF4}) elseif(DEFINED ENV{SSEC_NETCDF4_DIR}) set(NETCDF_DIR $ENV{SSEC_NETCDF4_DIR}) elseif(DEFINED ENV{SSEC_NETCDF_DIR}) @@ -74,6 +76,29 @@ find_library (NETCDF_LIBRARIES_C HINTS ${NETCDF_DIR}/lib ) mark_as_advanced(NETCDF_LIBRARIES_C) +if("${NETCDF_DIR}" STREQUAL "") + message(FATAL_ERROR " + Cannot find NETCDF!!!! + + ") +endif() +find_file (NETCDF_NCDUMP + NAMES ncdump + HINTS ${NETCDF_DIR}/bin ) +mark_as_advanced(NETCDF_NCDUMP) +execute_process(COMMAND ${NETCDF_NCDUMP} + ERROR_VARIABLE NCDUMP_INFO) +string(FIND "${NCDUMP_INFO}" "version" VERSION_LOC REVERSE) +math(EXPR VERSION_LOC "${VERSION_LOC} + 9") +string(SUBSTRING "${NCDUMP_INFO}" ${VERSION_LOC} 1 NETCDF_MAJOR_VERSION) +if (${NETCDF_MAJOR_VERSION} LESS 4) + message(FATAL_ERROR " + Current NETCDF is ${NETCDF_DIR} + !!!! NETCDF version 4.0 and above is required !!!! + + ") +endif() + set (NetCDF_has_interfaces "YES") # will be set to NO if we're missing any interfaces set (NetCDF_libs "${NETCDF_LIBRARIES_C}") diff --git a/cmake/Modules/setCompilerFlags.cmake b/cmake/Modules/setCompilerFlags.cmake index 705fa7ac3..9eff7c61b 100644 --- a/cmake/Modules/setCompilerFlags.cmake +++ b/cmake/Modules/setCompilerFlags.cmake @@ -11,13 +11,13 @@ function (setIntel) else() set( OMPFLAG "-openmp" CACHE INTERNAL "OpenMP flag") endif() - if(( HOST-Tide ) OR ( HOST-Gyre )) - set( MPI3FLAG "" CACHE INTERNAL "" ) - else() - set( MPI3FLAG "-DMPI3" CACHE INTERNAL "" ) - endif() STRING(COMPARE EQUAL ${CMAKE_BUILD_TYPE} "RELEASE" BUILD_RELEASE) STRING(COMPARE EQUAL ${CMAKE_BUILD_TYPE} "PRODUCTION" BUILD_PRODUCTION) + if(HOST-Jet) + set(HOST_FLAG "") + else() + set(HOST_FLAG "-xHOST") + endif() set( MKL_FLAG "-mkl" ) set(EXTRA_LINKER_FLAGS ${MKL_FLAG} PARENT_SCOPE) if( (BUILD_RELEASE) OR (BUILD_PRODUCTION) ) @@ -25,6 +25,7 @@ function (setIntel) set(GSI_Fortran_FLAGS "-DPOUND_FOR_STRINGIFY -traceback -O3 -fp-model source -convert big_endian -assume byterecl -implicitnone -D_REAL8_ ${OMPFLAG} ${MPI_Fortran_COMPILE_FLAGS}" PARENT_SCOPE) set(ENKF_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone -DGFS -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) set(UTIL_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone -DWRF -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone" PARENT_SCOPE) set (BACIO_Fortran_FLAGS "-O3 -free -assume nocc_omp " PARENT_SCOPE ) set (BUFR_Fortran_FLAGS "-O2 -r8 -fp-model strict -traceback -xSSE2 -O3 -axCORE-AVX2 ${OMPFLAG} " PARENT_SCOPE ) set (BUFR_C_FLAGS "-g -traceback -DUNDERSCORE -O3 -axCORE-AVX2 -DDYNAMIC_ALLOCATION -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) @@ -35,6 +36,7 @@ function (setIntel) set(ENKF_Fortran_FLAGS "-O3 -fp-model strict -convert big_endian -assume byterecl -implicitnone -DGFS -D_REAL8_ ${MPI3FLAG} ${OMPFLAG} " PARENT_SCOPE) set(UTIL_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone -DWRF -D_REAL8_ ${OMPFLAG} " PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone" PARENT_SCOPE) set (BACIO_Fortran_FLAGS "-O3 -free -assume nocc_omp " PARENT_SCOPE ) set (BUFR_Fortran_FLAGS " -c -g -traceback -O3 -axCORE-AVX2 -r8 " PARENT_SCOPE ) set (BUFR_C_FLAGS "-DSTATIC_ALLOCATION -DUNDERSCORE -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) @@ -49,8 +51,9 @@ function (setIntel) set (BUFR_C_FLAGS "-DSTATIC_ALLOCATION -DUNDERSCORE -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) else() set(GSI_Fortran_FLAGS "-DPOUND_FOR_STRINGIFY -O3 -fp-model source -assume byterecl -convert big_endian -g -traceback -D_REAL8_ ${OMPFLAG} ${MPI_Fortran_COMPILE_FLAGS}" PARENT_SCOPE) - set(ENKF_Fortran_FLAGS "-O3 -xHOST -warn all -implicitnone -traceback -fp-model strict -convert big_endian -DGFS -D_REAL8_ ${MPI3FLAG} ${OMPFLAG}" PARENT_SCOPE) - set(UTIL_Fortran_FLAGS "-O3 -xHOST -warn all -implicitnone -traceback -fp-model strict -convert big_endian -DWRF -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(ENKF_Fortran_FLAGS "-O3 ${HOST_FLAG} -warn all -implicitnone -traceback -fp-model strict -convert big_endian -DGFS -D_REAL8_ ${MPI3FLAG} ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_Fortran_FLAGS "-O3 ${HOST_FLAG} -warn all -implicitnone -traceback -fp-model strict -convert big_endian -DWRF -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS "-O3 -fp-model source -convert big_endian -assume byterecl -implicitnone" PARENT_SCOPE) set (BACIO_Fortran_FLAGS "-O3 -free -assume nocc_omp " PARENT_SCOPE ) set (BUFR_Fortran_FLAGS "-O2 -r8 -fp-model strict -traceback -xSSE2 -O3 -axCORE-AVX2 ${OMPFLAG} " PARENT_SCOPE ) set (BUFR_C_FLAGS "-g -traceback -DUNDERSCORE -O3 -axCORE-AVX2 -DDYNAMIC_ALLOCATION -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) @@ -79,11 +82,13 @@ function (setIntel) set(GSI_Fortran_FLAGS "-DPOUND_FOR_STRINGIFY -O0 -fp-model source -convert big_endian -assume byterecl -implicitnone -mcmodel medium -shared-intel -g -traceback -debug -ftrapuv -check all,noarg_temp_created -fp-stack-check -fstack-protector -warn all,nointerfaces -convert big_endian -implicitnone -D_REAL8_ ${OMPFLAG} ${MPI_Fortran_COMPILE_FLAGS}" PARENT_SCOPE) set(ENKF_Fortran_FLAGS "-g -O0 -fp-model source -convert big_endian -assume byterecl -implicitnone -warn all -traceback -debug all -check all,noarg_temp_created -implicitnone -DGFS -D_REAL8_ ${MPI3FLAG} ${OMPFLAG}" PARENT_SCOPE) set(UTIL_Fortran_FLAGS "-g -O0 -fp-model source -convert big_endian -assume byterecl -implicitnone -warn all -traceback -debug all -check all,noarg_temp_created -implicitnone -DWRF -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS "-g -O0 -fp-model source -convert big_endian -assume byterecl -implicitnone -warn all -traceback -debug all" PARENT_SCOPE) set (BACIO_Fortran_FLAGS "-g -free -assume nocc_omp " PARENT_SCOPE ) else() set(GSI_Fortran_FLAGS "-DPOUND_FOR_STRINGIFY -O0 -fp-model strict -convert big_endian -assume byterecl -implicitnone -g -traceback -debug -ftrapuv -check all,noarg_temp_created -fp-stack-check -fstack-protector -warn all,nointerfaces -D_REAL8_ ${OMPFLAG} ${MPI_Fortran_COMPILE_FLAGS}" PARENT_SCOPE) - set(ENKF_Fortran_FLAGS "-O0 -xHOST -warn all -implicitnone -traceback -g -debug full -fp-model strict -convert big_endian -D_REAL8_ ${MPI3FLAG} ${OMPFLAG}" PARENT_SCOPE) - set(UTIL_Fortran_FLAGS "-O0 -xHOST -warn all -implicitnone -traceback -g -debug full -fp-model strict -convert big_endian -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(ENKF_Fortran_FLAGS "-O0 ${HOST_FLAG} -warn all -implicitnone -traceback -g -debug full -fp-model strict -convert big_endian -D_REAL8_ ${MPI3FLAG} ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_Fortran_FLAGS "-O0 ${HOST_FLAG} -warn all -implicitnone -traceback -g -debug full -fp-model strict -convert big_endian -D_REAL8_ ${OMPFLAG}" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS "-O0 -warn all -implicitnone -traceback -g -debug full -fp-model strict -convert big_endian" PARENT_SCOPE) set (BACIO_Fortran_FLAGS "-g -free -assume nocc_omp " PARENT_SCOPE ) endif() #Common debug flags @@ -111,11 +116,12 @@ endfunction() function (setGNU) message("Setting GNU Compiler Flags") set(GSI_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp -ffree-line-length-0" PARENT_SCOPE) - set(EXTRA_LINKER_FLAGS "-lgomp" PARENT_SCOPE) + set(EXTRA_LINKER_FLAGS "-lgomp -lnetcdf -lnetcdff" PARENT_SCOPE) set(GSI_CFLAGS "-I. -DFortranByte=char -DFortranInt=int -DFortranLlong='long long' -g -Dfunder" PARENT_SCOPE ) set(ENKF_Fortran_FLAGS " -O3 -fconvert=big-endian -ffree-line-length-0 -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -DGFS -D_REAL8_ -fopenmp" PARENT_SCOPE) set(UTIL_Fortran_FLAGS " -O3 -fconvert=big-endian -ffree-line-length-0 -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -DWRF -D_REAL8_ -fopenmp" PARENT_SCOPE) - set(BUFR_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS " -O3 -fconvert=big-endian -ffree-line-length-0 -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check" PARENT_SCOPE) + set(BUFR_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -fdefault-real-8 -D_REAL8_ -fopenmp" PARENT_SCOPE) set(BUFR_Fortran_PP_FLAGS " -P " PARENT_SCOPE) set(BUFR_C_FLAGS " -O3 -g -DUNDERSCORE -DDYNAMIC_ALLOCATION -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) set(BACIO_Fortran_FLAGS " -O3 -fconvert=big-endian -ffree-form -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) @@ -123,10 +129,10 @@ function (setGNU) set(NEMSIO_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) set(SIGIO_Fortran_FLAGS " -O3 -fconvert=big-endian -ffree-form -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) set(SFCIO_Fortran_FLAGS " -O3 -ffree-form -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) - set(SP_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp -DLINUX" PARENT_SCOPE) - set(SP_F77_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp -DLINUX" PARENT_SCOPE) - set(W3EMC_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) - set(W3NCO_Fortran_FLAGS " -O3 -fconvert=big-endian -ffixed-form -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ " PARENT_SCOPE) + set(SP_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -fdefault-real-8 -D_REAL8_ -fopenmp -DLINUX" PARENT_SCOPE) + set(SP_F77_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -fdefault-real-8 -D_REAL8_ -fopenmp -DLINUX" PARENT_SCOPE) + set(W3EMC_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -fdefault-real-8 -D_REAL8_ -fopenmp" PARENT_SCOPE) + set(W3NCO_Fortran_FLAGS " -O3 -fconvert=big-endian -ffixed-form -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -fdefault-real-8 -D_REAL8_ " PARENT_SCOPE) set(W3NCO_C_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp" PARENT_SCOPE) set(WRFLIB_Fortran_FLAGS " -O3 -fconvert=big-endian -ffast-math -fno-second-underscore -frecord-marker=4 -funroll-loops -ggdb -static -Wall -fno-range-check -D_REAL8_ -fopenmp -ffree-line-length-0" PARENT_SCOPE) set( NCDIAG_Fortran_FLAGS "-ffree-line-length-none" PARENT_SCOPE ) @@ -141,8 +147,9 @@ function (setPGI) set(GSI_CFLAGS "-I. -DFortranByte=char -DFortranInt=int -DFortranLlong='long long' -g -Dfunder" PARENT_SCOPE ) set(ENKF_Fortran_FLAGS " -O3 -byteswapio -fast -DGFS -D_REAL8_ -mp" PARENT_SCOPE) set(UTIL_Fortran_FLAGS " -O3 -byteswapio -fast -DWRF -D_REAL8_ -mp" PARENT_SCOPE) + set(UTIL_COM_Fortran_FLAGS " -O3 -byteswapio -fast" PARENT_SCOPE) - set(BUFR_Fortran_FLAGS "-O1 -byteswapio -D_REAL8_ -mp" PARENT_SCOPE) + set(BUFR_Fortran_FLAGS "-O1 -byteswapio -D_REAL8_ -mp -r8" PARENT_SCOPE) set(BUFR_Fortran_PP_FLAGS " -P " PARENT_SCOPE) set(BUFR_C_FLAGS " -g -DUNDERSCORE -DDYNAMIC_ALLOCATION -DNFILES=32 -DMAXCD=250 -DMAXNC=600 -DMXNAF=3" PARENT_SCOPE ) @@ -152,10 +159,17 @@ function (setPGI) set(NEMSIO_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ -mp" PARENT_SCOPE) set(SIGIO_Fortran_FLAGS " -O3 -Mfree -byteswapio -fast -D_REAL8_ -mp" PARENT_SCOPE) set(SFCIO_Fortran_FLAGS " -O3 -byteswapio -Mfree -fast -D_REAL8_ -mp" PARENT_SCOPE) - set(SP_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ -mp" PARENT_SCOPE) - set(SP_F77_FLAGS "-DLINUX -O1 -byteswapio -D_REAL8_ -mp" PARENT_SCOPE) - set(W3EMC_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ " PARENT_SCOPE) - set(W3NCO_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ " PARENT_SCOPE) + set(SP_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ -mp -r8 " PARENT_SCOPE) + set(SP_F77_FLAGS "-DLINUX -O1 -byteswapio -D_REAL8_ -mp -r8 " PARENT_SCOPE) + set(W3EMC_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ -r8 " PARENT_SCOPE) + set(W3NCO_Fortran_FLAGS " -O1 -byteswapio -D_REAL8_ -r8 " PARENT_SCOPE) set(W3NCO_C_FLAGS " -O1 -D_REAL8_ -mp" PARENT_SCOPE) set(WRFLIB_Fortran_FLAGS "-Minform=inform -O1 -byteswapio -D_REAL8_ -mp -Mfree" PARENT_SCOPE) + set( NDATE_Fortran_FLAGS "-byteswapio -DCOMMCODE -DLINUX -DUPPLITTLEENDIAN -O3 -Wl,-noinhibit-exec" PARENT_SCOPE) + + if ( ${CMAKE_C_COMPILER_VERSION} VERSION_LESS 18.5 ) + set( NCDIAG_Fortran_FLAGS "-Mfree -DOLDPGI" PARENT_SCOPE ) + else() + set( NCDIAG_Fortran_FLAGS "-Mfree" PARENT_SCOPE ) + endif() endfunction() diff --git a/doc/EnKF_user_guide/.gitignore b/doc/EnKF_user_guide/.gitignore new file mode 100644 index 000000000..3b4dab087 --- /dev/null +++ b/doc/EnKF_user_guide/.gitignore @@ -0,0 +1,10 @@ +main.aux +main.bbl +main.blg +main.idx +main.log +main.out +main.pdf +main.synctex.gz +main.toc +*.pdf diff --git a/doc/EnKF_user_guide/README b/doc/EnKF_user_guide/README new file mode 100644 index 000000000..549499b80 --- /dev/null +++ b/doc/EnKF_user_guide/README @@ -0,0 +1,24 @@ +This directory contains the latex source for creating the enkf user's guide. This document requires +an instillation of latex that supports the CTAN extensions. + +1. Required flavor of LaTex. +Options for the three main platforms. + +Windows: +The most popular choice here is the MiKTEX (http://www.miktex.org) distribution, which lets you easily +manage TEX packages. Many people advise beginners to get the proTEXt bundling of MiKTEX, which lets you +install by using a .pdf file with links so you can read about your options and then click on the right +one. And it includes other components that help you work with your TEX system. + +Unix-type systems, including GNU/Linux: +The best choice here is TEX Live (http://tug.org/texlive/), which contains many packages and programs. +It is freely available over the Internet or on disc; see the web page for details. Note that most Unix +systems have TEX as an installation option so you might already have it or be able to easily get it using +your system administration package management tool: RPM, or DEB, or whatever. + +Mac­in­tosh: +Get the MacTEX (http://tug.org/mactex/) distribution, which is TEX Live with some Mac specific goodies. + +2. Building the user's guide document. +Run LaTex on the file main.tex. It imports organizes each of the individual chapter files. This creates +the document main.pdf. diff --git a/doc/EnKF_user_guide/ack.tex b/doc/EnKF_user_guide/ack.tex index 1f999b27d..7fea57260 100644 --- a/doc/EnKF_user_guide/ack.tex +++ b/doc/EnKF_user_guide/ack.tex @@ -8,7 +8,7 @@ \textcolor{darkgray}{\LARGE Acknowledgement} \vspace*{1cm}\par -We thank the National Oceanic and Atmospheric Administration (NOAA) Hurricane Forecast Improvement Program (HFIP) and Office of Oceanic and Atmospheric Research (OAR) for their support of this work. This work is also facilitated by the National Center for Atmospheric Research (NCAR). NCAR is supported by the National Science Foundation (NSF). +We thank the National Oceanic and Atmospheric Administration (NOAA) Office of Oceanic and Atmospheric Research (OAR) for their support of this work. This work is also facilitated by the National Center for Atmospheric Research (NCAR). NCAR is supported by the National Science Foundation (NSF). \end{flushleft} \end{titlepage} diff --git a/doc/EnKF_user_guide/enkf_appendix_A.tex b/doc/EnKF_user_guide/enkf_appendix_A.tex index a9b73c7c2..cf645f166 100644 --- a/doc/EnKF_user_guide/enkf_appendix_A.tex +++ b/doc/EnKF_user_guide/enkf_appendix_A.tex @@ -1,4 +1,4 @@ -\chapter{Content of Namelist} +\chapter{Content of Namelist}\label{nameless} The following are lists and explanations of the EnKF namelist variables. Users can also check file \textit{params.f90} for the details. \\ @@ -11,13 +11,18 @@ \chapter{Content of Namelist} \hline datein&Analysis date in YYYYMMDDHH&integer&0\\ datapath&path to data directory (include trailing slash)&Character (len=500) &""\\ -iassim\_order&= 0 for the order they are read in,\newline =1 for random order\newline = 2 for order of predicted posterior variance reduction (based on prior) &integer&0\\ +iassim\_order&= 0 for the order they are read in,\newline +=1 for random order\newline += 2 for order of predicted posterior variance reduction (based on prior) &integer&0\\ covinflatemax&maximum inflation&real(r\_single)&1.e30\\ covinflatemin&minimum inflation&real(r\_single)&1.0\\ deterministic&if true, use EnSRF w/o perturbed obs.\newline - if false, use perturbed obs EnKF. & logical & true\\ sortinc&if false, re-order obs to minimize regression - errors as described in Anderson (2003). &logical &true\\ corrlengthnh&length for horizontal localization (in km) in north hemisphere&real(r\_single)&2800\\ corrlengthtr&length for horizontal localization (in km) in tropic&real(r\_single)&2800\\ -corrlengthsh&length for horizontal localization (in km) in south hemisphere&real(r\_single)&2800\\ + if false, use perturbed obs EnKF. & logical & true\\ +sortinc&if false, re-order obs to minimize regression + errors as described in Anderson (2003). &logical &true\\ +corrlengthnh&length for horizontal localization (in km) in north hemisphere&real(r\_single)&2800\\ +corrlengthtr&length for horizontal localization (in km) in tropic&real(r\_single)&2800\\ +corrlengthsh&length for horizontal localization (in km) in south hemisphere&real(r\_single)&2800\\ \hline \end{tabular} \end{table} @@ -29,17 +34,22 @@ \chapter{Content of Namelist} \hline Variable Name&Description&Data Type&Default\\ \hline -varqc&Turn on varqc & logical & false \\ huber&use huber norm instead of "flat-tail" &logical & fales\\ -nlons&number of lons &integer&0\\ nlats&Number of lats & integer & 0\\ +varqc&Turn on varqc & logical & false \\ +huber&use huber norm instead of "flat-tail" &logical & fales\\ +nlons&number of lons &integer&0\\ +nlats&Number of lats & integer & 0\\ smoothparm&smoothing parameter for inflation (-1 for no smoothing) & real(r\_single) & -1\\ -readin\_localization&If true, read in localization length scales from an external file & logical & false\\ zhuberleft&Parameter for "huber norm" QC & real(r\_single) & 1.e30\\ +readin\_localization&If true, read in localization length scales from an external file & logical & false\\ +zhuberleft&Parameter for "huber norm" QC & real(r\_single) & 1.e30\\ zhuberright&Parameter for "huber norm" QC & real(r\_single) &1.e30\\ - obtimelnh&observation time localization in hours over north hemisphere & real(r\_single) &25.925\\ obtimeltr&observation time localization in hours over tropic & real(r\_single) & 25.925\\ + obtimelnh&observation time localization in hours over north hemisphere & real(r\_single) &25.925\\ +obtimeltr&observation time localization in hours over tropic & real(r\_single) & 25.925\\ obtimelsh& observation time localization in hours over south hemisphere & real(r\_single) &25.925\\ reducedgrid & Do smooth in a reduced grid with a variable number on longitudes per latitude. The number of longitudes is chosen so that the zonal grid -spacing is approximately the same as at the equator & logical & false \\ lnsigcutoffnh & length for vertical localization in ln(p) over north hemisphere for conventional observation&real(r\_single)& 2.0\\ +spacing is approximately the same as at the equator & logical & false \\ +lnsigcutoffnh & length for vertical localization in ln(p) over north hemisphere for conventional observation&real(r\_single)& 2.0\\ lnsigcutofftr &length for vertical localization in ln(p) over tropic conventional observation&real(r\_single)&2.0\\ lnsigcutoffsh &length for vertical localization in ln(p) over south hemisphere for conventional observation&real(r\_single) &2.0\\ lnsigcutoffsatnh &length for vertical localization in ln(p) over north hemisphere for satellite radiance observation&real(r\_single)& -999.0\\ @@ -54,7 +64,9 @@ \chapter{Content of Namelist} - + + + \begin{table}[htbp] \centering \begin{tabular}{p{3cm}p{7cm}p{2.5cm}p{1.5cm}} @@ -63,19 +75,28 @@ \chapter{Content of Namelist} \hline analpertwtnh&adaptive posterior inflation parameter over north hemisphere:\newline 1 means inflate all the way back to prior spread&real(r\_single)& 0.0\\ - analpertwtsh & adaptive posterior inflation parameter over tropic:\newline 1 means inflate all the way back to prior spread&real(r\_single) &0.0\\ - analpertwttr&adaptive posterior inflation parameter over south hemisphere:\newline 1 means inflate all the way back to prior spread&real(r\_single) &0.0\\ sprd\_tol&tolerance for background check: observations are not used if they are more than sqrt(S+R) from mean, - where S is ensemble variance and R is observation error variance. &real(r\_single) &9.9e31\\ nlevs &total number of levels&integer &0\\ + analpertwtsh & adaptive posterior inflation parameter over tropic:\newline +1 means inflate all the way back to prior spread&real(r\_single) &0.0\\ + analpertwttr&adaptive posterior inflation parameter over south hemisphere:\newline +1 means inflate all the way back to prior spread&real(r\_single) &0.0\\ +sprd\_tol&tolerance for background check: +observations are not used if they are more than sqrt(S+R) from mean, + where S is ensemble variance and R is observation error variance. &real(r\_single) &9.9e31\\ +nlevs &total number of levels&integer &0\\ nanals&number of ensemble members&integer & 0\\ - nvars&number of 3d variables to update. For hydrostatic models, typically 5 (u,v,T,q,ozone).&integer &5\\ saterrfact &factor to multiply sat radiance errors&real(r\_single) &1.0\\ + nvars&number of 3d variables to update. For hydrostatic models, typically 5 (u,v,T,q,ozone).&integer &5\\ +saterrfact &factor to multiply sat radiance errors&real(r\_single) &1.0\\ univaroz &If true, ozone observations only affect ozone &logical &true\\ regional &If true, analysis is for regional&logical &false\\ use\_gfs\_nemsio&If true, GFS background is in NEMS format&logical& false\\ paoverpb\_thresh &if observation space posterior variance divided by prior variance less than this value, -observation is skipped during serial processing. \newline 1.0 = don't skip any obs &(r\_single) &1.0\\ latbound &definition of tropics and mid-latitudes (for inflation). &real(r\_single) &25.0\\ +observation is skipped during serial processing. \newline +1.0 = don't skip any obs &(r\_single) &1.0\\ +latbound &definition of tropics and mid-latitudes (for inflation). &real(r\_single) &25.0\\ delat &width of transition zone &real(r\_single)& 10.0\\ pseudo\_rh &use 'pseudo-rh' analysis variable, as in GSI &logical & false\\ -numiter&number of times to iterate state/bias correction update. (only relevant when satellite radiances assimilated, i.e. nobs\_sat>0)&integer &1.0\\ \hline +numiter&number of times to iterate state/bias correction update. (only relevant when satellite radiances assimilated, i.e. nobs\_sat>0)&integer &1.0\\ +\hline \end{tabular} \end{table} @@ -87,12 +108,19 @@ \chapter{Content of Namelist} \hline Variable Name&Description&Data Type&Default\\ \hline -biasvar&background error variance for rad bias coeffs (used in radbias.f90). Default is (old) GSI value.\newline if negative, bias coeff error variace is set to - biasvar/N, where N is number of obs per instrument/channel.\newline if newpc4pred is .true., biasvar is not used - the estimated analysis error variance from the previous cycle is +biasvar&background error variance for rad bias coeffs (used in radbias.f90). Default is (old) GSI value.\newline +if negative, bias coeff error variace is set to - biasvar/N, where N is number of obs per instrument/channel.\newline +if newpc4pred is .true., biasvar is not used - the estimated analysis error variance from the previous cycle is used instead (same as in the GSI). &real(r\_single) &0.1\\ lupd\_satbiasc&if performing satellite bias correction update&logical &true\\ - cliptracers&if true, tracers are clipped to zero when read in, and just before they are written out.&logical& true\\ simple\_partition &partition obs for enkf using Graham's rule&logical &true\\ - adp\_anglebc&turn off or on the variational radiance angle bias correction&logical& false\\ angord &order of polynomial for angle bias correction&Integer& 0\\ - newpc4pred&controls preconditioning due to sat-bias correction term&logical&\\ nmmb&If true, ensemble forecast is NMMB&logical &false\\ iau&&logical &false\\ nhr\_anal&background forecast time for analysis&integer &6\\ + cliptracers&if true, tracers are clipped to zero when read in, and just before they are written out.&logical& true\\ +simple\_partition &partition obs for enkf using Graham's rule&logical &true\\ + adp\_anglebc&turn off or on the variational radiance angle bias correction&logical& false\\ +angord &order of polynomial for angle bias correction&Integer& 0\\ + newpc4pred&controls preconditioning due to sat-bias correction term&logical&\\ +nmmb&If true, ensemble forecast is NMMB&logical &false\\ +iau&&logical &false\\ +nhr\_anal&background forecast time for analysis&integer &6\\ letkf\_flag&If true, do LETKF&logical& false\\ boxsize &Observation box size for LETKF (deg)&real(r\_single) &90.0\\ massbal\_adjust&mass balance adjustment for GFS&logical &false\\ @@ -113,7 +141,8 @@ \chapter{Content of Namelist} \hline arw ®ional dynamical core ARW&logical &false\\ nmm®ional dynamical core NMM&logical& true\\ -doubly\_periodic&&logical&true\\ \hline +doubly\_periodic&&logical&true\\ +\hline \end{tabular} \end{table} @@ -127,7 +156,7 @@ \chapter{Content of Namelist} sattypes\_rad&strings describing the satellite data type (which form part of the diag* filename).& character(len=20) array (nsatmax\_rad) & '"" \\ dsis&strings corresponding to sattypes\_rad which correspond to the names in the NCEP global\_satinfo file.& -character(len=20) array (nsatmax\_rad)&""\\ +character(len=20) array (nsatmax\_rad)&""\\ \hline \end{tabular} \end{table} diff --git a/doc/EnKF_user_guide/enkf_ch1.tex b/doc/EnKF_user_guide/enkf_ch1.tex index fc7eac87d..d6ddc017a 100644 --- a/doc/EnKF_user_guide/enkf_ch1.tex +++ b/doc/EnKF_user_guide/enkf_ch1.tex @@ -1,4 +1,4 @@ -\chapter{Overview} +\chapter{Overview}\label{overview} %---------------------------------------------- \section{EnKF History and Background} @@ -55,7 +55,7 @@ \subsection{Community Code Contributions} \section{About This EnKF Release} %---------------------------------------------- -This user\textquotesingle s guide was composed for the EnKF community release version(v) 1.1. This version of EnKF is compatible with the GSI community release v3.5. Please note the major focuses of the DTC are currently on testing and evaluation of EnKF for regional numerical weather prediction (NWP) applications though the instructions and cases for EnKF global applications are available with this release. +This user\textquotesingle s guide was composed for the EnKF community release version(v) 1.2. This version of EnKF is compatible with the GSI community release v3.6. Please note the major focuses of the DTC are currently on testing and evaluation of EnKF for regional numerical weather prediction (NWP) applications though the instructions and cases for EnKF global applications are available with this release. Running this EnKF system requires running GSI a prior for its observation operators. Therefore, the GSI @@ -63,10 +63,10 @@ \section{About This EnKF Release} \subsection{What Is New in This Release Version} -Major updates to this version of EnKF are code optimization, including bug fixes and code cleanup. Added features include new capabilities to update multiple-time background ensembles and use of the NCEP I/O library \textit {nemio}. The observation types assimilated by EnKF were also updated as part of the GSI v3.5 updates. +Major updates to this version of EnKF are code optimization, including bug fixes and code cleanup. Added features include a new namelist to speed up the reading of GSI diagnostic files and added ensemble spread calculation utility for GFS sigma files . The observation types assimilated by EnKF were also updated as part of the GSI v3.6 updates. \subsection{Observations Used by This Version } -EnKF is using the GSI system as the observation operator to generate observation innovations. Therefore, the observation types assimilated by EnKF are the same as GSI. This version of EnKF has been tested to work with the community GSI release v3.5. It can assimilate, but is not limited to, the following types of observations: +EnKF is using the GSI system as the observation operator to generate observation innovations. Therefore, the observation types assimilated by EnKF are the same as GSI. This version of EnKF has been tested to work with the community GSI release v3.6. It can assimilate, but is not limited to, the following types of observations: \textbf{Conventional observations (including satellite retrievals):} \begin{itemize} @@ -88,13 +88,11 @@ \subsection{Observations Used by This Version } \item Quick Scatterometer (QuikSCAT), the Advanced Scatterometer (ASCAT) and Oceansat-2 Scatterometer (OSCAT) wind speed and direction \item RapidScat observations \item SSM/I and Tropical Rainfall Measuring Mission (TRMM) Microwave Imager (TMI) precipitation estimates -\item Doppler radial velocities \item Velocity-Azimuth Display (VAD) Next Generation Weather Radar ((NEXRAD) winds \item Global Positioning System (GPS) precipitable water estimates -\item Solar Backscatter Ultraviolet (SBUV) ozone profiles, Microwave Limb Sounder (MLS) (including NRT) ozone, and Ozone Monitoring Instrument (OMI) total ozone \item Sea surface temperature (SST) -\item Tropical Cyclone Vitals Database (TCVital) \item Doppler wind Lidar +\item Aviation routine weather report (METAR) cloud coverage \item Flight level and Stepped Frequency Microwave Radiometer (SFMR) High Density Observation (HDOB) from reconnaissance aircraft \item Tall tower wind @@ -126,4 +124,18 @@ \subsection{Observations Used by This Version } \item Himawari AHI \end{itemize} -\setlength{\parskip}{12pt} +\textbf{Others:} +\begin{itemize} +\item GPS Radio occultation (RO) refractivity and bending angle profiles +\item Solar Backscatter Ultraviolet (SBUV) ozone profiles, Microwave Limb Sounder (MLS) (including NRT) ozone, and Ozone Monitoring Instrument (OMI) total ozone +\item Doppler radar radial velocities +\item Radar reflectivity Mosaic +\item Tail Doppler Radar (TDR) radial velocity and super-observation +\item Tropical Cyclone Vitals Database (TCVital) +\item Particulate matter (PM) of 10-um diameter, 2.5-um diameter or less +\item MODIS AOD (when using GSI-chem package) +\end{itemize}\setlength{\parskip}{12pt} + + +Please note some of these above mentioned data are not yet fully tested and/or implemented for operations. Therefore, the current GSI code might not have the optimal setup for these data. + diff --git a/doc/EnKF_user_guide/enkf_ch2.tex b/doc/EnKF_user_guide/enkf_ch2.tex index 5f8f2c57c..7f6ade215 100644 --- a/doc/EnKF_user_guide/enkf_ch2.tex +++ b/doc/EnKF_user_guide/enkf_ch2.tex @@ -1,4 +1,4 @@ -\chapter{Software Installation} +\chapter{Software Installation}\label{enkf_install} \setlength{\parskip}{12pt} %---------------------------------------------- @@ -24,29 +24,29 @@ \section{Obtaining the Source Code} \label{ch2_obtain_code} %---------------------------------------------- The community EnKF code and the GSI code are released as a combined source code package. The current -EnKF release is v1.1 and is paired with the community GSI release version 3.5. +EnKF release is v1.2 and is paired with the community GSI release version 3.6. The community EnKF release is available from the DTC community EnKF users website; \url{http://www.dtcenter.org/EnKF/users/index.php} -The community GSI/EnKF release includes the source code for both the EnKF v1.1 and the GSI v3.5 models, as +The community GSI/EnKF release includes the source code for both the EnKF v1.2 and the GSI v3.6 models, as well as an integrated build system, utilities, and documentation necessary to build and run the EnKF. To download the source code from the either the GSI or the EnKF website, select the \underline{Download} tab along with the \underline{GSI/EnKF} System subtab on the vertical menu located on the left side of the main page. New users must first register before downloading the source code. Returning users only need to enter their registration email address to log in. After accessing the download page, select the link to the -\verb|comGSIv3.5_EnKFv1.1| tarball. Please only use the source code provided with the -\verb|comGSIv3.5_EnKFv1.1| tarball. Do not mix and match this tarball with other versions of the community +\verb|comGSIv3.6_EnKFv1.2| tarball. Please only use the source code provided with the +\verb|comGSIv3.6_EnKFv1.2| tarball. Do not mix and match this tarball with other versions of the community GSI code or supplemental libraries, as this will lead to unpredictable results. -The community EnKF version 1.1 comes in a tar file named \verb|comGSIv3.5_EnKFv1.1.tar|. The tar file may +The community EnKF version 1.2 comes in a tar file named \verb|comGSIv3.6_EnKFv1.2.tar|. The tar file may be unpacked by using the standard UNIX commands: \begin{verbatim} - gunzip comGSIv3.5_EnKFv1.1.tar.gz - tar -xvf comGSIv3.5-EnKFv1.1.tar + gunzip comGSIv3.6_EnKFv1.2.tar.gz + tar -xvf comGSIv3.6-EnKFv1.2.tar \end{verbatim} -This creates the top level GSI directory \verb|comGSIv3.5_EnKFv1.1/|. +This creates the top level GSI directory \verb|comGSIv3.6_EnKFv1.2/|. After downloading the source code, and prior to building, the user should check the known issues link on the download page of DTC website to determine if any bug fixes or platform specific customizations are needed. @@ -65,8 +65,8 @@ \section{Compiling EnKF} \label{ch2_compiling} \item If not already done, build and install a recent version of the WRF model. The WRF build is currently needed for the WRF I/O libraries and should use the same compiler as used for the EnKF and GSI builds. \item Build GSI (see chapter 2 of the GSI users guide for more details) \begin{description} -\item[ ]a. Set the environment variables (see chapter 2.4.2 of the GSI users guide) -\item[ ]b. Run the configure script located at the main GSI system directory. +\item[ ]a. Set the environment variables (see chapter 2 of the GSI users guide) +\item[ ]b. Run the configure script located at in the \verb|dtc/| directory. \item[ ]c. Select the EnKF configuration (the default is regional, see section \ref{ch2_versions_enkf}) \item[ ]d. Run the compile script \item[ ]e. Confirm that GSI has successfully built. @@ -103,12 +103,12 @@ \section{System Requirements and External Libraries} \label{ch2_system_requireme The basic requirements for building are: \begin{itemize} -\item FORTRAN 95+ compiler +\item FORTRAN 2003+ compiler \item MPI v1.2+ \item OpenMP \item NetCDF V3.6.3 or V4.2+ \item LAPACK and BLAS mathematics libraries, or equivalent -\item WRF V3.5+ +\item WRF V3.6+ \end{itemize} Because all but the last of these tools and libraries are typically the purview of system administrators to install and maintain, they are lumped together here as part of the basic system requirements. @@ -127,15 +127,11 @@ \section{Compilers Tested for Release} \label{ch2_compilers_tested} & Fortran compiler version & C compiler version \\ \hline \hline -Intel only & ifort 16.0.1, 15.0.1, 13.0.1, 12.1.5, 12.1.4 & icc \\ +Intel only & ifort 17.0.1, 16.0.3, 15.0.3, 14.0.2 & icc \\ \hline -Intel \& gcc & ifort 16.0.1, 15.0.1, 13.0.1, 12.1.5, 12.1.4 & gcc 4.8.2, 4.4.7 \\ +PGI only & pgf90 17.5, 16.5, 15.7 & pgcc \\ \hline -PGI only & pgf90 16.1, 15.10, 15.7, 15.1, 14.10, 14.9, 14.7, 13.9, 13.3 & pgcc \\ -\hline -PGI \& gcc & pgf90 16.1, 15.10, 15.7, 15.1, 14.10, 14.9, 14.7, 13.9, 13.3 & gcc 4.8.2 \\ -\hline -GNU only & gfortran 6.3.0, 5.3.0 & gcc 6.3.0, 5.3.0 \\ +GNU only & gfortran 5.4.0 with netcdf 4.4.0 & gcc 5.4.0 \\ \hline \end{tabular} \label{ch2_tble1} diff --git a/doc/EnKF_user_guide/enkf_ch3.tex b/doc/EnKF_user_guide/enkf_ch3.tex index dd7fcf67e..6c42c331a 100644 --- a/doc/EnKF_user_guide/enkf_ch3.tex +++ b/doc/EnKF_user_guide/enkf_ch3.tex @@ -1,4 +1,4 @@ -\chapter{Running EnKF} +\chapter{Running EnKF}\label{enkf_run} \setlength{\parskip}{12pt} %---------------------------------------------- diff --git a/doc/EnKF_user_guide/enkf_ch4.tex b/doc/EnKF_user_guide/enkf_ch4.tex index 8c8f449dd..2d0820222 100644 --- a/doc/EnKF_user_guide/enkf_ch4.tex +++ b/doc/EnKF_user_guide/enkf_ch4.tex @@ -1,4 +1,4 @@ -\chapter{EnKF Diagnostics and Tuning} +\chapter{EnKF Diagnostics and Tuning}\label{enkf_diag} \setlength{\parskip}{12pt} This chapter will discuss how to assess whether an EnKF was successful based on the contents of the standard output (stdout). Properly checking the EnKF output will also provide useful information to diagnose potential errors in the system. The chapter begins with an introduction to the content and structure of the EnKF stdout, followed by detailed discussion of tuning options in the namelist. This chapter follows the online exercise for a case at 00z on February 13th, 2014 (case 2014021300). This case uses WRF-ARW NetCDF ensemble files as the background and analyzes several observations typical for operations, including most conventional observation data and select radiance data (AMSU-A , HIRS4). The case was run on a Linux cluster supercomputer, using 32 cores. Users can follow this test to reproduce the following results by visiting: diff --git a/doc/EnKF_user_guide/enkf_ch5.tex b/doc/EnKF_user_guide/enkf_ch5.tex index 0c55952d7..b4ade8fc0 100644 --- a/doc/EnKF_user_guide/enkf_ch5.tex +++ b/doc/EnKF_user_guide/enkf_ch5.tex @@ -1,4 +1,4 @@ -\chapter{Applications for Regional and Global EnKF} +\chapter{Applications for Regional and Global EnKF}\label{application} \setlength{\parskip}{12pt} In this chaper, the elements from the previous chapters will be applied to demonstrate how to run a regional and global case using the GSI observer and EnKF. These examples are intended to give users a clear idea of how to set up the GSI observer and EnKF for a particular application and properly check the run status and analysis results in order to determine if the run was successful. Note that the regional example focuses on WRF ARW, however WRF NMM and NMMB runs are similar, but require different background ensemble and namelist options. Similarly, the global example features a single global configuration (T254), however users may wish to use a different configuration, again requiring different background ensemble and namelist options. diff --git a/doc/EnKF_user_guide/enkf_ch6.tex b/doc/EnKF_user_guide/enkf_ch6.tex index 09ba0fa80..c854bf6da 100644 --- a/doc/EnKF_user_guide/enkf_ch6.tex +++ b/doc/EnKF_user_guide/enkf_ch6.tex @@ -1,4 +1,4 @@ -\chapter{EnKF Basic Concepts and Code Structure} +\chapter{EnKF Basic Concepts and Code Structure}\label{enkf_structure} \setlength{\parskip}{12pt} This chapter briefly describes basic concepts and the main code structure used in the current implementation of the NOAA EnKF in the form of EnSRF. Please note there are also other EnKF algorithms provided in this EnKF system. We are working on documenting the other algorithms and will complete the User's Guide in the future. diff --git a/doc/EnKF_user_guide/forward.tex b/doc/EnKF_user_guide/forward.tex index 7bb163f61..7e88640c5 100644 --- a/doc/EnKF_user_guide/forward.tex +++ b/doc/EnKF_user_guide/forward.tex @@ -5,22 +5,22 @@ \noindent \begin{flushleft} -\textcolor{darkgray}{\LARGE Forward} +\textcolor{darkgray}{\LARGE Foreword} \vspace*{1cm}\par This User\textquotesingle s Guide for the community ensemble Kalman filter (EnKF) data analysis system is particularly geared for beginners. -It describes the fundamentals of using EnKF, including basic skills of installing, running, diagnosing, and tuning EnKF. EnKF version (v) 1.1 was released in July 2016. This version of code is compatible with the Gridpoint Statistical Interpolation (GSI) analysis system community release v3.5. +It describes the fundamentals of using EnKF, including basic skills of installing, running, diagnosing, and tuning EnKF. EnKF version (v) 1.2 was released in September 2017. This version of code is compatible with the Gridpoint Statistical Interpolation (GSI) analysis system community release v3.6. This User\textquotesingle s Guide includes six chapters and one appendix: \begin{description} -\item[Chapter 1] provides a background introduction of the EnKF operational and community system, EnKF review committee, and data types that can be used in this version. -\item[Chapter 2] contains basic information about how to get started with EnKF, including system requirements; required software (and how to obtain it); how to download EnKF; and information about compilers, libraries, and how to build the code. -\item[Chapter 3] focuses on the input files needed to run EnKF and how to configure and run GSI observer and EnKF through a sample run script. This chapter also provides an example of a successful EnKF run. -\item[Chapter 4] includes information about diagnostics and tuning of the EnKF system through EnKF standard output and namelist variables. -\item[Chapter 5] illustrates how to setup and run the GSI observer and EnKF for a regional configuration and a global configuration, as well as how to diagnose the results. -\item[Chapter 6] introduces EnKF theory and the main structure of the code. -\item[Appendix A] describes the contents of the EnKF namelist. +\item[Chapter \ref{overview}] provides a background introduction of the EnKF operational and community system, EnKF review committee, and data types that can be used in this version. +\item[Chapter \ref{enkf_install}] contains basic information about how to get started with EnKF, including system requirements; required software (and how to obtain it); how to download EnKF; and information about compilers, libraries, and how to build the code. +\item[Chapter \ref{enkf_run}] focuses on the input files needed to run EnKF and how to configure and run GSI observer and EnKF through a sample run script. This chapter also provides an example of a successful EnKF run. +\item[Chapter \ref{enkf_diag}] includes information about diagnostics and tuning of the EnKF system through EnKF standard output and namelist variables. +\item[Chapter \ref{application}] illustrates how to setup and run the GSI observer and EnKF for a regional configuration and a global configuration, as well as how to diagnose the results. +\item[Chapter \ref{enkf_structure}] introduces EnKF theory and the main structure of the code. +\item[Appendix \ref{nameless}] describes the contents of the EnKF namelist. \end{description} This document is updated annually. For the latest version of this document and annual released code, please visit the EnKF User\textquotesingle s Website: @@ -36,7 +36,7 @@ %need update page number \textbf{Citation:}\\ -\texttt{Liu, H., M. Hu, D. Stark, H. Shao, K. Newman, and J. Whitaker, 2016: Ensemble Kalman Filter (EnKF) User\textquotesingle s Guide Version 1.1. Developmental Testbed Center. Available at \url{http://www.dtcenter.org/EnKF/users/docs/index.php}, 80 pp.} +\texttt{Liu, H., M. Hu, D. Stark, H. Shao, G. Ge, K. Newman, and J. Whitaker, 2017: Ensemble Kalman Filter (EnKF) User\textquotesingle s Guide Version 1.2. Developmental Testbed Center. Available at \url{http://www.dtcenter.org/EnKF/users/docs/index.php}, 80 pp.} \end{flushleft} \end{titlepage} diff --git a/doc/EnKF_user_guide/references.bib b/doc/EnKF_user_guide/references.bib new file mode 100644 index 000000000..17de7ed32 --- /dev/null +++ b/doc/EnKF_user_guide/references.bib @@ -0,0 +1,137 @@ +@article {Houtekamer2005, +author = {Houtekamer, P. L. and Mitchell, Herschel L.}, +title = {Ensemble Kalman filtering}, +journal = {Quarterly Journal of the Royal Meteorological Society}, +volume = {131}, +number = {613}, +publisher = {John Wiley & Sons, Ltd.}, +issn = {1477-870X}, +url = {http://dx.doi.org/10.1256/qj.05.135}, +doi = {10.1256/qj.05.135}, +pages = {3269--3289}, +keywords = {Balance, Data assimilation, Model error}, +year = {2005}, +} + + +@article{Gaspari1999, + title={Construction of correlation functions in two and three dimensions}, + author={G. Gaspari and S. E. Cohn}, + journal={Quarterly Journal of the Royal Meteorological Society}, + volume={125}, + Issue={554}, + pages={723-757}, + year={1999}, + } + +@article{Anderson2007, + title={Scalable Implementations of Ensemble Filter Algorithms for Data Assimilation}, + author={J. Anderson and N. Collins}, + journal={Journal of Atmospheric and Oceanic Technology}, + volume={24}, + pages={1452-1463}, + year={2007}, + } + +@article{Hunt2007, + title={Efficient data assimilation for a spatiotemporal has: a local ensemble transform Kalman filter}, + author={B. R. Hunt and E. J. Kostelich and I. Szunyogh}, + journal={Physica D}, + volume={230}, + pages={112-126}, + year={2007}, + } + +@article{Whitaker2002, + title={Ensemble data assimilation without perturbed observations}, + author={J.S. Whitaker and T. Hamill}, + journal={Mon. Wea. Rev.}, + volume={130}, + pages={1913-1924}, + year={2002}, + } + +@article{Whitaker2008, + title={Ensemble data assimilation with the NCEP Global Forecast System}, + author={J.S. Whitaker and T. Hamill and X. Wei and Y. Song and Z. Toth}, + journal={Mon. Wea. Rev.}, + volume={436}, + pages={463-482}, + year={2008}, + } + +@article{Whitaker2012, + title={Evaluating methods to account for system errors in ensemble data assimilation}, + author={J.S. Whitaker and T. Hamill}, + journal={Mon. Wea. Rev.}, + volume={140}, + pages={3078-3089}, + year={2012}, + } + +@article{Wu2002, + title={Three-dimensional variational analysis with spatially inhomogeneous covariances}, + author={W.-S. Wu and J. Purser and D. F. Parrish}, + journal={Mon. Wea. Rev.}, + volume={130}, + pages={2905-2916}, + year={2002}, + doi={10.1175/1520-0493(2002)130<2905:TDVAWS>2.0.CO;2.} + } + +@article{Purser2003a, + title={Numerical aspects of the application of recursive filters to variational statistical analysis. Part I: Spatially homogeneous and isotropic Gaussian covariances}, + author={J. Purser and W.-S. Wu and D. F. Parrish and N. M. Roberts}, + journal={Mon. Wea. Rev.}, + volume={131}, + pages={1524-1535}, + year={2003}, + doi={10.1175//1520-0493(2003)131<1524:NAOTAO>2.0.CO;2.} + } + +@article{Purser2003b, + title={Numerical aspects of the application of recursive filters to variational statistical analysis. Part II: Spatially inhomogeneous and anisotropic general covariances}, + author={J. Purser and W.-S. Wu and D. F. Parrish and N. M. Roberts}, + journal={Mon. Wea. Rev.}, + volume={131}, + pages={1536-1548}, + year={2003}, + doi={10.1175//2543.1.} + } + +@article{Shao2016, + title={Bridging Research to Operations Transitions: Status and Plans of Community GSI}, + author={H. Shao and J. Derber and X.-Y. Huang and M. Hu and K. Newman and D. Stark and M. Lueken and C. Zhou and L. Nance and Y.-H. Kuo and B. Brown}, + journal={Bulletin of the American Meteorological Society}, + year={2016}, + doi={10.1175/BAMS-D-13-00245.1} + } + +@article{Houtekamer1996, + title={A system simulation approach to ensemble prediction}, + author={P. L. Houtekamer and L. Lefaivre and J. Derome and H. Ritchie and H. L. Mitchell}, + journal={Mon. Wea. Rev.}, + volume={124}, + number={6}, + pages={1225--1242}, + year={1996} + } + +@article{Ziehmann2000, + title={Comparison of a single-model {EPS} with a multi-model ensemble consisting of a few operational models}, + author={Ziehmann, Christine}, + journal={Tellus}, + volume={52}, + number={3}, + pages={280--299}, + year={2000} + } + +@article{LeDuc2013, + title={Spatial-temporal fractions verification for high-resolution ensemble forecasts}, + author={Duc, Le and Saito, Kazuo and Seko, Hiromu}, + journal={Tellus}, + volume={65A}, + pages={18171}, + year={2013} + } diff --git a/doc/EnKF_user_guide/title.tex b/doc/EnKF_user_guide/title.tex index a07325594..d2e49c76a 100644 --- a/doc/EnKF_user_guide/title.tex +++ b/doc/EnKF_user_guide/title.tex @@ -7,19 +7,23 @@ \begin{center} \includegraphics[width=0.6\textwidth]{images/enkfbanner.pdf}~\\[2em] {\color{darkcerulean} - \Huge{User's Guide Version 1.1}\\[1em] - \large{- Compatible with GSI community release v3.5}\\[2em] - \normalsize{August 2016}\\[5em] + \Huge{User's Guide Version 1.2}\\[0.5em] + \large{- Compatible with GSI community release v3.6}\\[1em] + \normalsize{September 2017}\\[3em] } \normalsize{Hui Liu}\\ - \textit{\small{National Center for Atmospheric Research (NCAR)}}\\[2em] + \textit{\small{National Center for Atmospheric Research (NCAR)}}\\[1em] \normalsize{Ming Hu}\\ \textit{\small{National Oceanic and Atmospheric Administration (NOAA)/Earth System Research Laboratory}}\\ - \textit{\small{Cooperative Institute for Research in Environmental Sciences (CIRES)}}\\[2em] - \normalsize{Hui Shao, Don Stark, Kathryn Newman}\\ - \textit{\small{NCAR}}\\[2em] + \textit{\small{Cooperative Institute for Research in Environmental Sciences (CIRES)}}\\[1em] + \normalsize{Hui Shao, Don Stark}\\ + \textit{\small{NCAR}}\\[1em] + \normalsize{Guoqing Ge}\\ + \textit{\small{NOAA/CIRES}}\\[1em] + \normalsize{Kathryn Newman}\\ + \textit{\small{NCAR}}\\[1em] \normalsize{Jeff Whitaker}\\ - \textit{\small{NOAA/Earth System Research Laboratory}}\\[4em] + \textit{\small{NOAA/Earth System Research Laboratory}}\\[2em] \includegraphics[width=0.5\textwidth]{images/DTClogo.png}\\ diff --git a/doc/GSI_user_guide/.gitignore b/doc/GSI_user_guide/.gitignore new file mode 100644 index 000000000..3b4dab087 --- /dev/null +++ b/doc/GSI_user_guide/.gitignore @@ -0,0 +1,10 @@ +main.aux +main.bbl +main.blg +main.idx +main.log +main.out +main.pdf +main.synctex.gz +main.toc +*.pdf diff --git a/doc/GSI_user_guide/README b/doc/GSI_user_guide/README new file mode 100644 index 000000000..a0eb96dff --- /dev/null +++ b/doc/GSI_user_guide/README @@ -0,0 +1,24 @@ +This directory contains the latex source for creating the GSI user's guide. This document requires +an instillation of latex that supports the CTAN extensions. + +1. Required flavor of LaTex. +Options for the three main platforms. + +Windows: +The most popular choice here is the MiKTEX (http://www.miktex.org) distribution, which lets you easily +manage TEX packages. Many people advise beginners to get the proTEXt bundling of MiKTEX, which lets you +install by using a .pdf file with links so you can read about your options and then click on the right +one. And it includes other components that help you work with your TEX system. + +Unix-type systems, including GNU/Linux: +The best choice here is TEX Live (http://tug.org/texlive/), which contains many packages and programs. +It is freely available over the Internet or on disc; see the web page for details. Note that most Unix +systems have TEX as an installation option so you might already have it or be able to easily get it using +your system administration package management tool: RPM, or DEB, or whatever. + +Mac­in­tosh: +Get the MacTEX (http://tug.org/mactex/) distribution, which is TEX Live with some Mac specific goodies. + +2. Building the user's guide document. +Run LaTex on the file main.tex. It imports organizes each of the individual chapter files. This creates +the document main.pdf. diff --git a/doc/GSI_user_guide/ack.tex b/doc/GSI_user_guide/ack.tex index b737c3252..22853b80d 100644 --- a/doc/GSI_user_guide/ack.tex +++ b/doc/GSI_user_guide/ack.tex @@ -17,7 +17,7 @@ Xiang-Yu Huang, Syed Rizvi, Zhiquan Liu, and Arthur Mizzi National Oceanic and Atmospheric Administration (NOAA) Earth System Research Laboratory (ESRL): \\ -Steve Weygandt, Dezso Devenyi, Joseph Olson, and Jeff Beck +Steve Weygandt, Dezso Devenyi, and Joseph Olson The GSI community support and code management effort is sponsored by NOAA's Office of Oceanic and Atmospheric Research (OAR). This work is also facilitated by NCAR. NCAR is supported by the National Science Foundation (NSF). diff --git a/doc/GSI_user_guide/forward.tex b/doc/GSI_user_guide/forward.tex index 17540267e..f3dd07854 100644 --- a/doc/GSI_user_guide/forward.tex +++ b/doc/GSI_user_guide/forward.tex @@ -5,21 +5,21 @@ \noindent \begin{flushleft} -\textcolor{darkgray}{\LARGE Forward} +\textcolor{darkgray}{\LARGE Foreword} \vspace*{1cm}\par -This document is the 2016 Gridpoint Statistical Interpolation (GSI) User\textquotesingle s Guide geared particularly for beginners. It describes the fundamentals of using GSI version (v) 3.5 released in July 2016. Advanced features of GSI as well as details of assimilation of specific data types can be found in the Advance GSI User\textquotesingle s Guide, released together with this document and the v3.5 code release. +This document is the 2017 Gridpoint Statistical Interpolation (GSI) User\textquotesingle s Guide, geared particularly for beginners. It describes the fundamentals of using GSI version (v) 3.6 released in September 2017. Advanced features of GSI as well as details of assimilation of specific data types can be found in the Advanced GSI User\textquotesingle s Guide, released together with this document and the v3.6 code release. This User\textquotesingle s Guide includes six chapters and three appendices: \begin{description} \item[Chapter 1] provides a background introduction of GSI. -\item[Chapter 2] contains basic information about how to install and compile GSI - including system requirements; required software (and how to obtain it); how to download GSI; and information about compilers, libraries, and how to build the code. -\item[Chapter 3] focuses on the input files needed to run GSI and how to configure and run GSI through a sample run script. Also provides example of a successful GSI run and explanations of often used namelist variables. +\item[Chapter 2] contains basic information about how to install and compile GSI - including system requirements, required software (and how to obtain it), how to download GSI, and information about compilers, libraries, and how to build the code. +\item[Chapter 3] focuses on the input files needed to run GSI and how to configure and run GSI through a sample run script. It also provides an example of a successful GSI run and explanations of often-used namelist variables. \item[Chapter 4] includes information about diagnostics and tuning of the GSI system through GSI standard output, statistic fit files, and some diagnostic tools. -\item[Chapter 5] illustrates the GSI applications for regional ARW cases, including the setup of different data types such as conventional, radiance, and GPSRO data and different analysis functions available in the GSI such as hybrid analysis. -\item[Chapter 6] illustrates the GSI applications for global case and chemical cases. -\item[Appendix A] introduces the community tools available for GSI users. -\item[Appendix B] is content of the GSI namelist section OBS\_INPUT. +\item[Chapter 5] illustrates the GSI applications for regional WRF-ARW cases, including the setup of different data types such as conventional, radiance, and GPSRO data, and different analysis functions available in the GSI, such running a hybrid analysis. +\item[Chapter 6] illustrates the GSI applications for global and chemical cases. +\item[Appendix A] introduces community tools available for GSI users. +\item[Appendix B] describes the content of the GSI namelist section OBS\_INPUT. \item[Appendix C] contains a complete list of the GSI namelist with explanations and default values. \end{description} @@ -34,7 +34,7 @@ %\item[Appendix C:] GSI Namelist: Name, Default value, Explanation %\end{description} -For the latest version of GSI User's Guide and released code, please visit the GSI User\textquotesingle s Website: +For the latest version of the GSI User\textquotesingle s Guide and released code, please visit the GSI User\textquotesingle s Website: \begin{center} \url{http://www.dtcenter.org/com-GSI/users/index.php} \end{center} @@ -44,17 +44,17 @@ gsi-help@ucar.edu \end{center} -This document and the annual GSI releases are made available through a community GSI effort jointly led by the Developmental Testbed Center (DTC) and the National Centers for Environmental Prediction (NCEP) Environmental Modeling Center (EMC), in collaboration with other GSI developers. To help sustain this effort, we recommend for those who use the community released GSI, the GSI helpdesk, the GSI User's Guide, and other DTC GSI services, please refer to this community GSI effort in their work and publications. +This document and the annual GSI releases are made available through a community GSI effort jointly led by the Developmental Testbed Center (DTC) and the National Centers for Environmental Prediction (NCEP) Environmental Modeling Center (EMC), in collaboration with other GSI developers. To help sustain this effort, we request that those who use the community-released GSI, the GSI helpdesk, the GSI User\textquotesingle s Guide, or other DTC GSI services, please refer to this community GSI effort in their work and publications. -For referencing this user's guide, please use: +To reference this user's guide, please use: -\texttt{Hu, M., H. Shao, D. Stark, K. Newman, C. Zhou, and X. Zhang, 2016: Grid-point Statistical -Interpolation (GSI) User's Guide Version 3.5. Developmental Testbed Center. Available at -http://www.dtcenter.org/com-GSI/users/docs/index.php, 141 pp.} +\texttt{Hu, M., G. Ge, H. Shao, D. Stark, K. Newman, C. Zhou, J. Beck, and X. Zhang, 2017: Grid-point Statistical +Interpolation (GSI) User's Guide Version 3.6. Developmental Testbed Center. Available at +http://www.dtcenter.org/com-GSI/users/docs/index.php, 149 pp.} For referencing the general aspect of the GSI community effort, please use: -\texttt{Shao, H., J. Derber, X.-Y. Huang, M. Hu, K. Newman, D. Stark, M. Lueken, C. Zhou, L. Nance, Y.-H. Kuo, B. Brown, 2016: Bridging Research to Operations Transitions: Status and Plans of Community GSI. Bulletin of the American Meteorological Society, doi:10.1175/BAMS-D-13-00245.1, in press} +\texttt{Shao, H., J. Derber, X.-Y. Huang, M. Hu, K. Newman, D. Stark, M. Lueken, C. Zhou, L. Nance, Y.-H. Kuo, B. Brown, 2016: Bridging Research to Operations Transitions: Status and Plans of Community GSI. Bull. Amer. Meteor. Soc., 97, 1427-1440, doi: 10.1175/BAMS-D-13-00245.1.} \end{flushleft} diff --git a/doc/GSI_user_guide/gsi_appendixA.tex b/doc/GSI_user_guide/gsi_appendixA.tex index 9075dfeee..c44492b32 100644 --- a/doc/GSI_user_guide/gsi_appendixA.tex +++ b/doc/GSI_user_guide/gsi_appendixA.tex @@ -1,4 +1,4 @@ -\chapter{GSI Community Tools} +\chapter{GSI Community Tools}\label{gsi_tool} %------------------------------------------------------------------------------- \section{BUFR Format and BUFR Tools} @@ -301,6 +301,82 @@ \section{Plot Single Observation Test Result and Analysis Increment} \end{scriptsize} +%------------------------------------------------------------------------------- +\section{Generate initial regional ensembles } +%------------------------------------------------------------------------------- + +Under the \textit{./util/EnKF} directory, there are two sub-directories:\textit{ enspreproc\_regional.fd/} and \textit{initialens\_regional.fd/}. The first one is to extract ensemble pertubations from GDAS 80 member ensembles and the second one is to add the extracted ensembles to a regional WRF background field (considered as the mean filed) to generate initial regional ensembles. + +Before using these two unitilies, you should have already sucessfully compiled the GSI and gotten the "gsi.exe" file. After that, enter each of the two directory, type "make" to compile the utilities. A sucessful compilation should yield "enspreproc.exe" and "initialens.exe" respectively. + +Now, the next step is to get GDAS spectrally smoothed atmospheric ensemble forecasts. These files should be in the sigma format, which is currrenlty the only format supported by "enspreproc.exe". You need to contact NCEP or other appropriate contacts to download these kind of ensembles. These ensemble files follow the name convection of "sfg\_\$CDATE\_fhr\$FEs\_mem\$MEM". \$CDATE is the cycle date, such as 2017011518 which means 18z of Jan. 15th, 2017. \$FE is the forecast hour, for example, 06 means 6 hour of forecasts. \$MEM is the member number. Here is an example of GDAS ensmbles: \textit{sfg\_2017011518\_fhr06s\_mem001}. + +After you download the required GDAS ensembles, follow the following steps: + +1. Running "enspreproc.exe", enter the \textit{enspreproc\_regional.fd/} directory: + +(1). generate the file "fileslist01". This file lists the ensemble files to be used in the calculation of ensemble pertubations. For example, if it is determined to use 20 members to generate ensemble perturbations, the file "filelist01" will be as follows: +\begin{scriptsize} +\begin{verbatim} + sfg_2017011518_fhr06s_mem001 + sfg_2017011518_fhr06s_mem002 + sfg_2017011518_fhr06s_mem003 + ... + sfg_2017011518_fhr06s_mem018 + sfg_2017011518_fhr06s_mem019 + sfg_2017011518_fhr06s_mem020 +\end{verbatim} +\end{scriptsize} + +(2). Modify the file "namelist.input", change "n\_ens" to the total number of ensembles to be used. + +(3). Copy the "anavinfo" file used by GSI into current directory. + +(4). Copy the background WRF file, name it as "wrf\_inout". + +(5). Create a job description file, submit the job to get it run in parallel. + +After the successful running of "enspreproc.exe", you will get ensemble perturbations as follows: +\begin{scriptsize} +\begin{verbatim} + en_perts4ars.mem0001 + en_perts4ars.mem0002 + en_perts4ars.mem0003 + ... + en_perts4ars.mem0018 + en_perts4ars.mem0019 + en_perts4ars.mem0020 +\end{verbatim} +\end{scriptsize} + +2. Runnning "initialens.exe", enter the \textit{initialens\_regional.fd/} directory: + +(1). Modify the file "namelist.input", change "n\_ens" to the total number of ensembles to be used. + +(2). Copy wrf\_inout to current directry + +(3). Copy wrf\_inout to wrfinput\_d01.mem\$MEM files as follows: +\begin{scriptsize} +\begin{verbatim} + cp wrf_inout wrfinput_d01.mem0001 + cp wrf_inout wrfinput_d01.mem0002 + cp wrf_inout wrfinput_d01.mem0003 + ... + cp wrf_inout wrfinput_d01.mem0018 + cp wrf_inout wrfinput_d01.mem0019 + cp wrf_inout wrfinput_d01.mem0020 +\end{verbatim} +\end{scriptsize} + + Be sure that each member has a correspoding wrfinput\_d01 file. These files will be updated by "initialens.exe" later. + +(4). Link the ensemble perturbations generated by "enspreproc.exe" to current directory. Something like this \textit{ln -s ../enspreproc\_regional.fd/en\_perts4arw.mem*}. + +(5). Create a job description file, submit the job to get it run in parallel. Please note that only 1 processor is required to run "initialens.exe" but submitting it to run on computing node is a must. + +After the sucessful running of "initialens.exe", all the \textit{wrfinput\_d01.mem\$MEM} files are updated with ensemble perturbation added to the background or "mean" state of the original wrf\_inout. + +Now the initial regionl ensembles have been sucessfully generated. diff --git a/doc/GSI_user_guide/gsi_appendixB.tex b/doc/GSI_user_guide/gsi_appendixB.tex index 2e86f9f45..7deb1d759 100644 --- a/doc/GSI_user_guide/gsi_appendixB.tex +++ b/doc/GSI_user_guide/gsi_appendixB.tex @@ -1,4 +1,4 @@ -\chapter{Contents of Namelist Section OBS\_INPUT} +\chapter{Contents of Namelist Section OBS\_INPUT}\label{gsi_obslist} \begin{scriptsize} \begin{verbatim} diff --git a/doc/GSI_user_guide/gsi_appendixC.tex b/doc/GSI_user_guide/gsi_appendixC.tex index 92e037410..23b0a06c4 100644 --- a/doc/GSI_user_guide/gsi_appendixC.tex +++ b/doc/GSI_user_guide/gsi_appendixC.tex @@ -1,4 +1,4 @@ -\chapter{GSI Namelist: Name, Default Value, Explanation} +\chapter{GSI Namelist: Name, Default Value, Explanation}\label{gsi_namelist} The following are lists and explanations of the GSI namelist variables. You can also find them in the source code \textbf{gsimod.F90}. \begin{table}[h] diff --git a/doc/GSI_user_guide/gsi_ch1.tex b/doc/GSI_user_guide/gsi_ch1.tex index b90dbb7bc..985d36f96 100644 --- a/doc/GSI_user_guide/gsi_ch1.tex +++ b/doc/GSI_user_guide/gsi_ch1.tex @@ -1,25 +1,27 @@ -\chapter{Overview} +\chapter{Overview}\label{overview} %------------------------------------------------------------------------------- \section{GSI History and Background} %------------------------------------------------------------------------------- -The Gridpoint Statistical Interpolation (GSI) system is a unified data assimilation (DA) system for both global and regional applications. It was initially developed by the National Centers for Environmental Prediction (NCEP) Environmental Modeling Center (EMC) as a next generation analysis system based on the then operational Spectral Statistical Interpolation (SSI) analysis system (\cite{Wu2002}; \cite{Purser2003a}; \cite{Purser2003b}). Instead of being constructed in spectral space like the SSI, the GSI is constructed in physical space and is designed to be a flexible, state-of-art system that is efficient on available parallel computing platforms. Starting with a three-dimensional variational (3DVar) data assimilation technique, current GSI can be run as a data assimilation system of 2DVar (for surface data analysis), 3DVar, 3D ensemble-variational (3D EnVar), 4D EnVar, 3D/4D hybrid EnVar, or 4DVar (if coupled with an adjoint model of GSI supported forecast systems). +The Gridpoint Statistical Interpolation (GSI) system is a unified data assimilation (DA) system for both global and regional applications. It was initially developed by the National Centers for Environmental Prediction (NCEP) Environmental Modeling Center (EMC) as a next generation analysis system based on the then operational Spectral Statistical Interpolation (SSI) analysis system (\cite{Wu2002}; \cite{Purser2003a}; \cite{Purser2003b}). Instead of being constructed in spectral space like the SSI, the GSI is constructed in physical space and is designed to be a flexible, state-of-art system that is efficient on available parallel computing platforms. Starting with a three-dimensional variational (3DVar) data assimilation technique, the current GSI can be run as a data assimilation system of 2DVar (for surface data analysis), 3DVar, 3D ensemble-variational (3D EnVar), 4D EnVar, 3D/4D hybrid EnVar, or 4DVar (if coupled with an adjoint model from a GSI supported forecast system). -After initial development, the GSI analysis system became operational as the core of the North American Data Assimilation System (NDAS) for the North American Mesoscale (NAM) system in June 2006 and the Global Data Assimilation System (GDAS) for the Global Forecast System (GFS) in May 2007 at National Oceanic and Atmospheric Administration (NOAA). Since then, the GSI system has been adopted in various operational systems, including the National Aeronautics and Space Administration (NASA) Goddard Earth Observing System Model (GEOS), the Unite States Air Force (USAF) mesoscale data assimilation system, the NOAA Real-Time Mesoscale Analysis (RTMA) system, the Hurricane Weather Research and Forecasting (WRF) model (HWRF), and the Rapid Refresh (RAP) and High Resolution Rapid Refresh (HRRR) system, etc. The number of groups and institutes involved in operational GSI development has also increased throughout these years. +After initial development, the GSI analysis system became operational as the core of the North American Data Assimilation System (NDAS) for the North American Mesoscale (NAM) system in June 2006 and the Global Data Assimilation System (GDAS) for the Global Forecast System (GFS) in May 2007 at National Oceanic and Atmospheric Administration (NOAA). Since then, the GSI system has been adopted in various operational systems, including the National Aeronautics and Space Administration (NASA) Goddard Earth Observing System Model (GEOS), the United States Air Force (USAF) mesoscale data assimilation system, the NOAA Real-Time Mesoscale Analysis (RTMA) system, the Hurricane Weather Research and Forecasting (WRF) model (HWRF), and the Rapid Refresh (RAP) and High Resolution Rapid Refresh (HRRR) systems. The number of groups and institutes involved in operational GSI development has also increased throughout these years. %------------------------------------------------------------------------------- \section{GSI Becomes Community Code} %------------------------------------------------------------------------------- -In 2007, the Developmental Testbed Center (DTC) began collaborating with major GSI development groups to transform the operational GSI system into a community system and support distributed development (\cite{Shao2016}). The DTC complements the development groups in providing GSI documentation, porting GSI to multiple platforms, and testing GSI in an independent and objective environment, while still maintaining functionally equivalent to operational centers. Working with EMC, the DTC is maintaining a community GSI repository, which is equivalent to the operational developmental repository, and facilitates community users to develop GSI. Based on the repository, the DTC releases GSI code annually with updated documentation. The first community version of the GSI system was released in 2009. This user\textquotesingle s guide describes the 2016 release of GSI (v3.5) in July 2016. The DTC provides user support through the GSI Helpdesk (gsi-help@ucar.edu), and tutorials and workshops. More information about the GSI community services can be found at the DTC GSI webpage (\url{http://www.dtcenter.org/com-GSI/users/index.php}). +In 2007, the Developmental Testbed Center (DTC) began collaborating with major GSI development groups to transform the operational GSI system into a community system and support distributed development (\cite{Shao2016}). The DTC complements the development groups in providing GSI documentation, porting GSI to multiple platforms, and testing GSI in an independent and objective environment, while maintaining equivalent functionality to what used in thoperational centers. Since 2009, due to the NOAA security constraints, the DTC has been maintaining a community GSI code repository, which mirrors the EMC operational GSI code repository and facilitates community users to develop GSI. Based on this community repository, the DTC releases GSI code annually with updated documentation. Currently, the DTC and EMC are working closely to build a unified GSI code repository for both operational and community developers and users. This unified repository will facilitate direct communication among developers and help accelerate transitions between research and operations. Transition to this unified code repository is ongoing and will be completed by end of 2017. + +The first community version of the GSI system was released in 2009. This user\textquotesingle s guide describes the release of GSI (v3.6) in September 2017. The DTC provides user support through the GSI Helpdesk (gsi-help@ucar.edu), tutorials and workshops. More information about the GSI community services can be found at the DTC GSI webpage (\url{http://www.dtcenter.org/com-GSI/users/index.php}). %------------------------------------------------------------------------------- \subsection{GSI Code Management and Review Committee} %--------------------------------------------------------------------of----------- -The GSI code development and maintenance are managed by the Data Assimilation Review Committee (DARC). It was originally formed as the GSI Review Committee in 2010, with a goal of incorporating all major GSI development teams in the United States within a unified community framework. In 2014, EMC and DTC decided to merge their GSI code repository with the code repository of the NOAA ensemble Kalman filter (EnKF) data assimilation system. Such a merging enabled coordinated development of both systems and joint community support. Following the repository merging, the GSI Review Committee was transitioned to DARC, incorporating new members representing the EnKF development and applications. Currently, DARC contains members from NCEP/EMC, NASA's Goddard Global Modeling and Assimilation Office (GMAO), NOAA's Earth System Research Laboratory (ESRL), the National Center for Atmospheric Research (NCAR) Mesoscale \& Microscale Meteorology Laboratory (MMM), the National Environmental Satellite, Data, and Information Service (NESDIS), USAF, the University of Maryland, and the DTC (chair). The DTC also releases this EnKF system, along with GSI, annually. Please refer to the community EnKF user's webpage (\url{http://www.dtcenter.org/EnKF/users/index.php}) for more information. +The GSI code development and maintenance are managed by the Data Assimilation Review Committee (DARC). It was originally formed as the GSI Review Committee in 2010, with the goal of incorporating all major GSI development teams in the United States within a unified community framework. In 2014, EMC and DTC decided to merge their GSI code repository with the code repository of the NOAA ensemble Kalman filter (EnKF) data assimilation system. This merge enabled coordinated development of both systems and joint community support. Following the repository merging, the GSI Review Committee was transitioned to DARC, incorporating new members representing EnKF development and applications. Currently, DARC contains members from NCEP/EMC, NASA\textquotesingle s Goddard Global Modeling and Assimilation Office (GMAO), NOAA's Earth System Research Laboratory (ESRL), the Joint Center for Satellite Data Assimilation (JCSDA), the National Center for Atmospheric Research (NCAR) Mesoscale \& Microscale Meteorology Laboratory (MMM), the National Environmental Satellite, Data, and Information Service (NESDIS), USAF, the University of Maryland, and the DTC (chair). The DTC also releases the EnKF system annually (along with GSI). Please refer to the community EnKF user\textquotesingle s webpage (\url{http://www.dtcenter.org/EnKF/users/index.php}) for more information. -DARC primarily steers distributed GSI/EnKF development and community code management and support. The responsibilities of the committee are divided into two major aspects: coordination and code review. The purpose and guiding principles of the review committee are as follows: +DARC primarily steers distributed GSI/EnKF development, community code management, and support. The responsibilities of the committee are divided into two major aspects: coordination and code review. The purpose and guiding principles of the review committee are as follows: \begin{itemize} \item{Coordination and advisory} \begin{itemize} @@ -33,7 +35,8 @@ \subsection{GSI Code Management and Review Committee} \item Establish and manage a unified coding standard followed by all GSI/EnKF developers \item Review proposed modifications to the code trunk \item Make decisions on whether code change proposals are accepted or denied for -inclusion in the repository and manage the repository +inclusion in the repository +\item Manage the repository \item Oversee the timely testing and inclusion of code into the repository \end{itemize} @@ -43,64 +46,71 @@ \subsection{GSI Code Management and Review Committee} \subsection{Community Code Contributions} %------------------------------------------------------------------------------- -GSI is a community data assimilation system, open to contributions from scientists and software engineers from both the operational and research communities. DARC oversees the code transition from prospective contributors. This committee reviews proposals for code commits to the GSI repository and monitors that coding standards and tests are being fulfilled. Once the committee reaches approval, the contributed code will be committed to the GSI code repository and available for operational implementation and public release. +GSI is a community data assimilation system, open to contributions from scientists and software engineers from both the operational and research communities. DARC oversees the code transition from prospective contributors. This committee reviews proposals for code commits to the GSI repository and ensures that coding standards and tests are being fulfilled. Once the committee approves, the contributed code will be committed to the GSI code repository and available for operational implementation and public release. -To facilitate this process, the DTC is providing code transition assistance to the general research community. Prospective contributors of code to the GSI system should contact the DTC GSI helpdesk (gsi-help@ucar.edu) for the preparation and integration of their code. It is the contributor's responsibility to ensure that a proposed code change is correct, meets the GSI coding standards, and its expected impact is documented. The DTC will help the contributors run the regression tests and merge the code with the top of the repository trunk. Prospective contributors can also apply to the DTC visitor program for their GSI research and code transition. The visitor program is open to applications year-round. Please check the visitor program webpage (\url{www.dtcenter.org/visitors/}) for the latest announcement of opportunity and application procedures. +To facilitate this process, the DTC is providing code transition assistance to the general research community. Prospective code contributors should contact the DTC GSI helpdesk (gsi-help@ucar.edu) for the preparation and integration of their code. It is the responsibility of the contributor to ensure that a proposed code change is correct, meets GSI coding standards, and its expected impact is documented. The DTC will help the contributor run regression tests and merge the code with the top of the repository trunk. Prospective contributors can also apply to the DTC visitor program for their GSI research and code transition. The visitor program is open to applications year-round. Please check the visitor program webpage (\url{www.dtcenter.org/visitors/}) for the latest announcement of opportunity and application procedures. %------------------------------------------------------------------------------- \section{About This GSI Release} %------------------------------------------------------------------------------- -As a critical part of the GSI user support, this document is provided to assist users in applying GSI to data assimilation and analysis studies. It was composed by the DTC and reviewed by the DARC members. Please note the major focuses of the DTC are currently on testing and evaluation of GSI for regional numerical weather prediction (NWP) applications though the instructions and cases for GSI global and chemical applications are available with this release. This documentation describes the GSI v3.5 release. Active users can contact the DTC (gsi-help@ucar.edu) for developmental versions of GSI. - -The GSI v3.5 can be used either as a 3DVar system, a 3D (hybrid) EnVar system, a 4D (hybrid) EnVar system. Currently, most of NOAA applications are using 3D hybrid EnVar system. Coupled with a forecast model and its adjoint model, GSI can be turned into a 4DVar system with embedded 4DVar features (e.g., GEOS). +As a critical part of the GSI user support, this document is provided to assist users in applying GSI to data assimilation and analysis studies. It was composed by the DTC and reviewed by the DARC members. Please note that the major focuses of the DTC are currently on testing and evaluation of GSI for regional numerical weather prediction (NWP) though the instructions. GSI global and chemical applicaitons are briefly discussed in the document. The document is based on GSI v3.6 release. Active users can contact the DTC (gsi-help@ucar.edu) for developmental versions of GSI and access to the GSI code repository. %------------------------------------------------------------------------------- \subsection{What Is New in This Release Version} %------------------------------------------------------------------------------- -The following lists some of the new functions and changes included in the GSI release v3.5 versus the previous v3.4: +The following lists some of the new functions and changes included in the v3.6 release of the GSI versus v3.5: + +\textbf{Observational aspects}: +\begin{itemize} + \item Added assimilation of full spectral resolution CrIS radiance observations + \item Added near surface temperature (NSST) analysis + \item Added options to use correlated radiance observation errors +\end{itemize} -\textbf{New observation types}: +\textbf{Code optimization and refactoring}: \begin{itemize} - \item All sky data assimilation option for Advanced Microwave Sounding Unit-A (AMSU-A) - \item Assimilation of Geostationary Operational Environmental Satellite (GOES) clear air water vapor (CAWV) atmospheric motion vectors (AMV) - \item Assimilation of Special Sensor Microwave Imager/Sounder (SSMI/S) Defense Meteorological Satellite Program (DMSP) F19 - \item Initial capabilities for assimilating observations from the following instruments: - \begin{itemize} - \item Global Change Observation Mission-W1 satellite (GCOM-W1) Advanced Microwave Scanning Radiometer 2 (AMSR2) - \item Global Precipitation Measurement (GPM) Microwave Imager (GMI) - \item Megha-Tropiques Sondeur Atmospherique du Profil D'Humidite Intertropicale par Radiometrie (SAPHIR) - \item Himawari Advanced Himawari Imager (AHI) - \item International Space Station Rapid Scatterometer (ISS-RapidScat) - \end{itemize} + \item Refactored the observer modules using polymorphic code + \item Generalized all radiance assimilation across different sensors/instruments for cloud and aerosol usages in GSI + \item Removed the First-Order Time extrapolation to the Observation (FOTO) + \item Updated to netCDF v4.0 functionality + \item Removed unused modules/variables \end{itemize} -\textbf{Algorithm/application update}: +\textbf{Application specific updates}: \begin{itemize} -\item Added 4D (hybrid) EnVar option -\item Added new AMV algorithm (requires a new BUFR table) -\item Added QC for regional assimilation of GPS RO bending angle -\item Updated observation thinning algorithms -\item Added the capability of using blend global-regional coordinate with HWRF ensemble -\item Added capability to output ensemble spread -\item Updated to near surface sea temperature (NSST) capability -\item Updated to RTMA capability + \item{Non-variational cloud analysis} + \begin{itemize} + \item Added number concentration for cloud water, cloud ice, and rain to match the cloud analysis with the Thompson Microphysical scheme + \item Added functions using visibility/fog observation to improve cloud fields in the lowest two levels + \item Added capability to read BUFR format NASA LaRC cloud products + \end{itemize} + \item{RTMA} + \begin{itemize} + \item Added variational QC algorithm using a super-logistic distribution function + \item Added cloud ceiling height and scalar wind as analysis variables + \end{itemize} \end{itemize} -\textbf{Libraries update}: +\textbf{Other updates}: \begin{itemize} -\item Switched to new version of the Community Radiative Transfer Model (CRTM) (v2.2.3) -\item added capability to use the NCEP I/O library \textit {nemio} for global GSI + \item Added the Advanced Research WRF (ARW) hybrid vertical coordinate background to GSI + \item Added a vertical dependence of the hybrid background error weighting, and horizontal/vertical localization scales in GSI + \item Added a NCEP nemsio interface for GFS deterministic and ensemble forecasts + \item Utility updates such as using GFS ensemble forecast perturbations to initialize WRF ensemble forecasts. + \item Bug fixes \end{itemize} -Please note due to the version update, some diagnostic files and static information files might have been modified as well. +Besides the above-mentioned changes, the release code also includes a new cmake-based build utility. This utility is currently being tested for its portability and has been included in v3.6. In the near future, the DTC and EMC will use the same cmake build utility for all operational and research code. Transition to this new build utility will be completed by early 2018. + +Please note that due to the version update, some diagnostic and static information files might have been modified as well. %------------------------------------------------------------------------------- \subsection{Observations Used by This Version} %------------------------------------------------------------------------------- -GSI is being used by various applications on multiple scales. The types of observations GSI can assimilate vary from conventional to aerosol observations. Users should use observations with caution to fit their specific applications. The GSI v3.5 can assimilate, but is not limited to, the following types of observations: +GSI is used by various applications on multiple scales. The types of observations GSI can assimilate vary from conventional to aerosol observations. Users should use observations with caution to fit their specific applications. The GSI v3.6 can assimilate, but is not limited to, the following types of observations: \textbf{Conventional observations (including satellite retrievals):} \begin{itemize} @@ -117,14 +127,14 @@ \subsection{Observations Used by This Version} \item European Organization for the Exploitation of Meteorological Satellites (EUMETSAT) and GOES water vapor cloud top winds \item GEOS hourly IR and cloud top wind \item Surface land observations -\item Surface ship and buoy observation +\item Surface ship and buoy observations \item Special Sensor Microwave Imager (SSMI) wind speeds \item Quick Scatterometer (QuikSCAT), the Advanced Scatterometer (ASCAT) and Oceansat-2 Scatterometer (OSCAT) wind speed and direction \item RapidScat observations \item SSM/I and Tropical Rainfall Measuring Mission (TRMM) Microwave Imager (TMI) precipitation estimates \item Velocity-Azimuth Display (VAD) Next Generation Weather Radar ((NEXRAD) winds \item Global Positioning System (GPS) precipitable water estimates -\item Sea surface temperature (SST) +\item Sea surface temperatures (SSTs) \item Doppler wind Lidar \item Aviation routine weather report (METAR) cloud coverage \item Flight level and Stepped Frequency Microwave Radiometer (SFMR) High Density @@ -162,18 +172,13 @@ \subsection{Observations Used by This Version} \begin{itemize} \item GPS Radio occultation (RO) refractivity and bending angle profiles \item Solar Backscatter Ultraviolet (SBUV) ozone profiles, Microwave Limb Sounder (MLS) (including NRT) ozone, and Ozone Monitoring Instrument (OMI) total ozone -\item Doppler radar radial velocities +\item Doppler radar radial velocities \item Radar reflectivity Mosaic \item Tail Doppler Radar (TDR) radial velocity and super-observation \item Tropical Cyclone Vitals Database (TCVital) -\item Particulate matter (PM) of 2.5-um diameter or less +\item Particulate matter (PM) of 10-um diameter, 2.5-um diameter or less \item MODIS AOD (when using GSI-chem package) +\item Significant wave height observations from JASON-2, JASON-3, SARAL/ALTIKA and CRYOSAT-2 \end{itemize} -Please note some of these above mentioned data are not yet fully tested and/or implemented for operations. Therefore, the current GSI code might not have the optimal setup for these data. - - - - - - +Please note that some of these above mentioned data are not yet fully tested and/or implemented for operations. Therefore, the current GSI code might not have an optimal setup for these data. diff --git a/doc/GSI_user_guide/gsi_ch2.tex b/doc/GSI_user_guide/gsi_ch2.tex index 8cbe8c890..eca9d2887 100644 --- a/doc/GSI_user_guide/gsi_ch2.tex +++ b/doc/GSI_user_guide/gsi_ch2.tex @@ -1,31 +1,31 @@ -\chapter{Software Installation} +\chapter{Software Installation}\label{gsi_install} \setlength{\parskip}{12pt} %------------------------------------------------------------------------------- \section{Introduction} %------------------------------------------------------------------------------- -The DTC community GSI is a community distribution of NOAA\textquotesingle s operational GSI. +The DTC GSI is a community distribution of NOAA\textquotesingle s operational GSI. The community GSI expands the portability of the operational code by adding a flexible build system and providing example run scripts that allow GSI to be compiled and run on many common -platforms. The current version of GSI is 3.5. It builds and runs on most standard -Linux platforms using either Intel, PGI, and Gnu compilers. Legacy build rules are provided for +platforms. The current version of GSI is 3.6. It builds and runs on most standard +Linux platforms using Intel, PGI, and Gnu compilers. Legacy build rules are provided for two platforms, the IBM AIX computers using the xlf compiler, and Intel based Macintosh -computers using the PGI compiler. In both cases, the default build system must be modified to +computers using the PGI compiler. In both cases, the default build system must be significantly modified to build on these platforms. See the community web page user support FAQ to get started. This chapter describes how to build and install the DTC community GSI software on your local Linux computing resources. These instructions apply only to the DTC community GSI. While -the community GSI source code is identical to the tag of the NCEP\textquotesingle s GSI +the community GSI source code is identical to the NCEP\textquotesingle s GSI trunk code used for release, the community build system is different, allowing it to be more general to support a wide variety of computing platforms. -The GSI building process consists of four general steps: +The GSI build process consists of four general steps: \begin{itemize} -\item Obtain the source code for GSI and WRF. -\item Build the WRF model (see the WRF users guide). -\item Set the appropriate environment variables for the GSI build. -\item Configure and compile the GSI source code. +\item Obtaining the source code for GSI and WRF. +\item Building the WRF model (see the WRF user\textquotesingle s guide). +\item Setting the appropriate environment variables for the GSI build. +\item Configuring and compiling the GSI source code. \end{itemize} This chapter is organized as follows: Section \ref{ch2_obtainingcode} describes how to obtain @@ -33,16 +33,16 @@ \section{Introduction} supplemental NCEP libraries included with the distribution. Section \ref{ch2_compiling} starts with an outline of the build example and then goes into a more detailed discussion of setting up the build environment and the configure and compile steps. -Section \ref{ch2_buildexample} illustrates the build process for the three of the compilers (Intel, -PGI and Gnu) on the NCAR supercomputer Yellowstone. -Section \ref{ch2_externallibs} covers the system requirements and settings (tools, libraries, and environment +Section \ref{ch2_buildexample} illustrates the build process for three of the compilers (Intel, +PGI and Gnu) on the NCAR Yellowstone supercomputer. +Section \ref{ch2_externallibs} covers system requirements and settings (tools, libraries, and environment variable settings), and currently supported platforms in detail. Section \ref{ch2_gettinghelp} discusses what to do if you have problems with the build and where to get help. -For beginning users, sections \ref{ch2_obtainingcode} and \ref{ch2_compiling} provide the necessary +For beginners, sections \ref{ch2_obtainingcode} and \ref{ch2_compiling} provide the necessary steps to obtain the code and build GSI on most systems. The remainder of the chapters provide -background material for completeness. -Advanced topics, such as customizing the build, porting to new platforms, and debugging can be +background material for completeness. A final chapter \ref{ch2_cmake}, discusses the new experimental CMake build system being developed by the DTC and EMC as a common shared build method. + Advanced topics, such as customizing the build, porting to new platforms, and debugging can be found in the GSI Advanced User\textquotesingle s Guide. %------------------------------------------------------------------------------- @@ -59,15 +59,15 @@ \section{Obtaining and Setting Up the Source Code} \label{ch2_obtainingcode} \underline{GSI/EnKF System} submenu. New users must first register before downloading the source code. Returning users only need to enter their registration email address to log in. After accessing the download page, -select the link to the \verb|comGSIv3.5_EnKFv1.1.tar| tarball to download the most recent version -of the source code (July 2016). Selecting the newest release of the community GSI is critical for +select the link to the \verb|comGSIv3.6_EnKFv1.2.tar| tarball to download the most recent version +of the source code (October 2017). Selecting the newest release of the community GSI is critical for having the most recent capabilities, versions of supplemental libraries, and bug fixes. Full support is only offered for the two most recent code releases. -To analyze satellite radiance observations, GSI requires use of CRTM coefficients. It is important to -use \textbf{only} the version of CRTM coefficients provided by GSI website. Due to their large size, +To analyze satellite radiance observations, GSI requires the use of CRTM coefficients. It is important to +use \textbf{only} the version of CRTM coefficients provided on the GSI website. Due to their large size, these are available as a separate tarfile. They can be downloaded by selecting the link to the -tarball for the \verb|CRTM 2.2.3 Big Endian coefficients| from the web page. For all compilers +tarball for the \verb|CRTM 2.2.3 Big Endian coefficients| from the web page. For all compilers, use the big endian byte order coefficients found in the first CRTM link. The download page also contains links to the fixed files necessary for @@ -77,48 +77,50 @@ \section{Obtaining and Setting Up the Source Code} \label{ch2_obtainingcode} \item Global configuration (fix files to run Global GSI) \end{itemize} -The community GSI version 3.5 comes in a tar file named \verb|comGSIv3.5_EnKFv1.1.tar|. The tar +The community GSI version 3.6 comes in a tar file named \verb|comGSIv3.6_EnKFv1.2.tar|. The tar file may be unpacked by using the UNIX commands: \begin{small} \begin{verbatim} -gunzip comGSIv3.5_EnKFv1.1.tar.gz -tar -xvf comGSIv3.5_EnKFv1.1.tar +gunzip comGSIv3.6_EnKFv1.2.tar.gz +tar -xvf comGSIv3.6_EnKFv1.2.tar \end{verbatim} \end{small} -This creates the top level GSI directory \verb|comGSIv3.5_EnKFv1.1/|. +This creates the top level GSI directory \verb|comGSIv3.6_EnKFv1.2/|. After downloading the source code, and prior to building, the user should check the known issues -link on the download page of DTC website to determine if any bug fixes or platform specific customizations are needed. +link on the download page of the DTC website to determine if any bug fixes or platform specific customizations are needed. %------------------------------------------------------------------------------- \section{Directory Structure, Source Code and Supplemental Libraries} \label{ch2_directorystructure} %------------------------------------------------------------------------------- -The GSI system includes the GSI source code, the build system, supplemental libraries, fixed files, and run scripts. -The following table lists the system components found inside of the root GSI directory. +The GSI system includes the GSI source code, the build system, supplemental libraries, fixed files, and run scripts. Starting with the current version of GSI V3.6, the directory structure has be changed slightly. +The following table lists the system components found inside the root GSI directory. \begin{table}[htbp] \centering \begin{tabular}{| l | l |} \hline -Directory Name & Content \\ +Directory Name & Content\\ \hline \hline -src/main/ & GSI source code and makefiles \\ +src/ & GSI source code and makefiles \\ \hline -src/libs/ & Source code for supplemental libraries \\ +lib/ & Source code and build location for supplemental libraries \\ +\hline +core-libs/ & Build directory for supplemental libraries for CMake build \\ +\hline +libsrc/ & Source code for supplemental libraries for the CMake build \\ \hline fix/ & Fixed input files required by a GSI analysis, such as \\ & background error covariances, observation error tables; \\ - & excluding the CRTM coefficients \\ + & Excluding the CRTM coefficients \\ \hline include/ & Include files created by the build system \\ \hline -lib/ & Location for built supplemental libraries \\ -\hline -run/ & Directory for executable \verb|gsi.exe| and sample run scripts \\ +dtc/ & Directory for the DTC build system, executable \verb|gsi.exe| location, and sample run scripts \\ \hline arch/ & Build system support such as machine architecture specifics \\ & (see Advanced GSI User\textquotesingle s Guide) \\ @@ -129,8 +131,8 @@ \section{Directory Structure, Source Code and Supplemental Libraries} \label{ch \label{ch2_tble1} \end{table} -For the convenience of the user, supplemental NCEP libraries for building GSI are -included in the \verb|src/libs/| directory. These libraries are built when GSI is built. +For convenience, supplemental NCEP libraries for building GSI are +included in the \verb|src/libs/| directory. These libraries will be built when compiling GSI. These supplemental libraries are listed in the table below. \begin{table}[htbp] @@ -158,31 +160,28 @@ \section{Directory Structure, Source Code and Supplemental Libraries} \label{ch \hline sp/ & NCEP spectral - grid transforms \\ \hline -w3emc\_v2.0.5 & NCEP/EMC W3 library (date/time manipulation, GRIB) \\ +w3emc\_v2.0.5/ & NCEP/EMC W3 library (date/time manipulation, GRIB) \\ \hline -w3nco\_v2.0.6 & NCEP/NCO W3 library (date/time manipulation, GRIB) \\ +w3nco\_v2.0.6/ & NCEP/NCO W3 library (date/time manipulation, GRIB) \\ \hline \end{tabular} \label{ch2_tble2} \end{table} -The one nonstandard \textit{library} not included with the source code are the WRF IO API\textquotesingle s. These are obtained by linking to a build of the WRF code. Please note that the release version of WRF/EnKF has only been tested using the previous two release versions of WRF. Older versions of WRF may provide unpredictable results. +The one set of non-standard \textit{library} files not included with the source code are those associated with the WRF IO API. These are obtained by linking to a build of the WRF code. Please note that the release version of WRF/EnKF has only been tested using the previous two release versions of WRF. Older versions of WRF may provide unpredictable results. -The WRF code, and full WRF documentation, can be obtained from the WRF Users\textquotesingle \ Page, +Following a registration process similar to that for downloading GSI, the WRF code and full WRF documentation can be obtained from the WRF Users\textquotesingle \ Page, \url{http://www.mmm.ucar.edu/wrf/users/} -following a registration process similar to that for downloading GSI. - %------------------------------------------------------------------------------- \section{Compiling GSI} \label{ch2_compiling} %------------------------------------------------------------------------------- This section starts with a quick outline of how to build GSI (\ref{ch2_compiling_overview}), followed by a more detailed discussion of the build process (\ref{ch2_compiling_env} \& \ref{ch2_compiling_configandcompile}). Typically GSI will build \textit{straight out of the box} on any system that successfully builds the WRF model. Should the user experience any difficulties with the default build, check the build environment against the requirements described at the end of section \ref{ch2_externallibs}. -To proceed with the GSI build, it is assumed that the WRF model has already been built on the current system. GSI uses the WRF I/O API libraries to read the background file. These I/O libraries are created as part of the WRF build, and are linked into GSI during the GSI build process. In order to successfully link the WRF I/O libraries with the GSI source, it is crucial that both WRF and GSI are built using the same Fortran compilers. This means that if WRF is built with the Intel Fortran compiler, then GSI must also be built with the Intel Fortran compiler. It is also recommended that both codes be built with the same annual version number of the compiler -(.e.g. 12, 13, 14, 15, etc.). +To proceed with the GSI build, it is assumed that the WRF model has already been built on the current system. GSI uses the WRF I/O API libraries to read the background file. These I/O libraries are created as part of the WRF build, and are linked into GSI during the GSI build process. In order to successfully link the WRF I/O libraries with the GSI source, it is crucial that both WRF and GSI are built using the same Fortran compilers. This means that if WRF is built with the Intel Fortran compiler, then GSI must also be built with the Intel Fortran compiler. It is also recommended that both codes be built with the same annual version number of the compiler (e.g., 14, 15, etc.). %------------------------------------------------------------------------------- \subsection{Build Overview} \label{ch2_compiling_overview} @@ -191,13 +190,14 @@ \subsection{Build Overview} \label{ch2_compiling_overview} This section provides a quick outline of the steps necessary to build the GSI code. The following steps describe that build process. \begin{enumerate} -\item \textbf{Set the environment for the compiler:} If not already done so, set the necessary paths for using your selected compiler, such as loading the appropriate modules or modifying your path variable. -\item \textbf{Set the environment variables:} The first path on this list will always need to be set. The remaining two will depend on your choice of compiler and how your default environment is configured. +\item \textbf{Set the environment for the compiler}: If not already done, set the necessary paths for using your selected compiler, such as loading the appropriate modules or modifying your path variable. +\item \textbf{Set the environment variables}: The first path on this list will always need to be set. The remaining two will depend on your choice of compiler and how your default environment is configured. \begin{description} \item[]a. \verb|WRF_DIR| the path to the compiled WRF directory (to always be set) \item[]b. \verb|NETCDF| the path to the \verb|NETCDF| libraries \item[]c. \verb|LAPACK_PATH| the path to the \verb|LAPACK| math libraries \end{description} +\item \textbf{Change into the dtc/ directory} \item \textbf{Run the configure script} \item \textbf{Run the compile script} \end{enumerate} @@ -206,15 +206,15 @@ \subsection{Build Overview} \label{ch2_compiling_overview} \subsection{Environment Variables} \label{ch2_compiling_env} %------------------------------------------------------------------------------- -Before configuring the GSI code to be built, at least one, and no more than three environment variables must be set. +Before configuring the GSI code to be built, be sure to check the following enviroment variables: \begin{description} -\item[WRF\_DIR] defines the path to the root of the WRF build directory. Setting this is mandatory. This variable tells the GSI build system where to find the WRF I/O libraries. The process for setting the environment variables varies according to the login shell in use. To set the path variable WRF\_DIR for csh/tcsh, type; +\item[WRF\_DIR] defines the path to the root of the WRF build directory. Setting this is mandatory. This variable tells the GSI build system where to find the WRF I/O libraries. The process for setting the environment variables varies according to the login shell used. To set the path variable WRF\_DIR for csh/tcsh, type: \begin{verbatim} setenv WRF_DIR /path_to_WRF_root_directory/ for csh or tcsh export WRF_DIR=/path_to_WRF_root_directory/ for ksh or bash \end{verbatim} -\item[NETCDF] The second environment variable specifies the local path to NetCDF library. The path location for NETCDF environment variable may be checked by using the command +\item[NETCDF] The second environment variable specifies the local path to NetCDF library. The path location for the NETCDF environment variable may be checked by using the command \begin{verbatim} echo $NETCDF \end{verbatim} @@ -225,14 +225,14 @@ \subsection{Environment Variables} \label{ch2_compiling_env} it is then necessary to manually set this variable. If your system uses modules or a similar mechanism to set the environment, do this first. If a valid path is returned by the echo command, no further action is required.\\ \item[LAPACK\_PATH] defines the path to the LAPACK library. Typically, this variable will only need to be set on systems without a vendor provided version of LAPACK. -IBM systems typically come installed with the LAPACK equivalent ESSL library that links automatically. Likewise, the PGI compiler often comes with a vendor provided version of LAPACK that links automatically with the compiler. Experience has shown that the following situations make up the majority of cases where the LAPACK variable needed to be set: +IBM systems typically come installed with the LAPACK equivalent ESSL library that links automatically. Likewise, the PGI compiler often comes with a vendor provided version of LAPACK that links automatically with the compiler. Experience has shown that the following situations make up the majority of cases where the LAPACK variable needs to be set: \begin{itemize} \item Linux environments using Intel Fortran compiler. \item Building with Gfortran. \item On systems where the path variables are not properly set. -\item On stripped down versions of the IBM AIX OS that lack the ESSL libraries +\item On stripped down versions of the IBM AIX OS that lack the ESSL libraries \end{itemize} -Of the four, the first of these is the most common. The Intel compiler usually comes with a vendor provided mathematics library known as the \textit{Mathematics Kernel Libraries} or MKL for short. While most installations of the Intel compiler typically come with the MKL libraries installed, the ifort compiler does not automatically load the library. It is therefore necessary to set the LAPACK\_PATH variable to the location of the MKL libraries when using the Intel compiler. You may need to ask your system administrator for the correct path to these libraries. +Of these four, the first case is the most common. The Intel compiler usually comes with a vendor provided mathematics library known as the \textit{Mathematics Kernel Libraries} or MKL for short. While most installations of the Intel compiler typically come with the MKL libraries installed, the ifort compiler does not automatically load the library. It is therefore necessary to set the LAPACK\_PATH variable to the location of the MKL libraries when using the Intel compiler. You may need to ask your system administrator for the correct path to these libraries. \end{description} On super-computing systems with multiple compiler options, these variables may be set as part of the module settings for each compiler. On the NCAR supercomputer Yellowstone, the Intel build environment can be specified through setting the appropriate modules. When this is done, the MKL library path is available through a local environment variable, MKLROOT. The LAPACK environment may be set for csh/tcsh with the Unix commands @@ -248,7 +248,7 @@ \subsection{Environment Variables} \label{ch2_compiling_env} \end{verbatim} \end{small} -Once the environment variables have been set, the next step in the build process is to first run the configure script and then the compile script. +Once the environment variables have been set, the next step in the build process is to run the configure and compile scripts. %------------------------------------------------------------------------------- \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} @@ -257,9 +257,9 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} Once the environment variables have been set, building the GSI source code requires two additional steps: \begin{enumerate} \item Run the configure script and select a compiler option. -\item Run the compile script +\item Run the compile script. \end{enumerate} -Change into the \verb|comGSIv3.5_EnKFv1.1/| directory and issue the configure command: +Change into the \verb|comGSIv3.6_EnKFv1.2/dtc| directory and issue the configure command: \begin{verbatim} ./configure \end{verbatim} @@ -267,11 +267,11 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} \verb|configure.gsi|. The script starts by echoing the NETCDF and WRF\_DIR paths set in the previous section. It then examines the current system and queries the user to select from multiple build options. -For 64-bit Linux the options will be the following: +For 64-bit Linux, the options will be the following: \begin{scriptsize} \begin{verbatim} -Will use NETCDF in dir: /glade/apps/opt/netcdf/4.3.0/intel/12.1.5 -Will use WRF in dir: /glade/p/work/mhu/wrf/WRFV3.5 +Will use NETCDF in dir: /glade/apps/opt/netcdf/4.3.0/intel/default +Will use WRF in dir: /glade/p/work/stark/WRF/intel/trunk_20150420_3-7_RELEASE ------------------------------------------------------------------------ Please select from among the following supported platforms. @@ -284,21 +284,23 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} 7. Linux x86_64, Intel compiler (ifort & icc) (dmpar,optimize) 8. Linux x86_64, Intel compiler (ifort & icc), IBM POE (EXPERIMENTAL) (dmpar,optimize) 9. Linux x86_64, Intel compiler (ifort & icc), SGI MPT (EXPERIMENTAL) (dmpar,optimize) + +Enter selection [1-9] : \end{verbatim} \end{scriptsize} -Looking at the list, there are two things to note. First is that the GNU C-compiler (gcc) may be +Looking at the list, there are two things to note. First, the GNU C-compiler (gcc) may be paired with any of the other Fortran compilers. This allows the build to use the GNU C-compiler in place of the Intel (icc) or PGI (pgcc) C-compiler. -The second thing to notice is that there are separate build targets for vendor supplied versions +There are also separate build targets for vendor supplied versions of MPI such as IBM POE and SGI MPT. This was added due to some computing hardware vendors creating non-standard mpif90 wrappers for their vendor supplied version of MPI. If uncertain about which to choose, start by selecting the default option corresponding to the fortran compiler you wish -to use (either 1,2,5,6, or 7). If that option fails with an error referencing a bad argument for mpif90, +to use (either 1, 5, or 7). If that option fails with an error referencing a bad argument for mpif90, only then try the options listed for use with \verb|Supercomp|, \verb|IBM POE|, or \verb|SGI MPT|. -On selecting an option, the process reports a successful configuration with the banner: +On selecting an option, the script reports a successful configuration with the banner: \begin{scriptsize} \begin{verbatim} ------------------------------------------------------------------------ @@ -306,7 +308,7 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} ------------------------------------------------------------------------ \end{verbatim} \end{scriptsize} -Failure to get this banner means that the configuration step failed to complete. The most typical reason for a failure is an error in one of the paths set to the environment variables. +Failure to get this banner means that the configuration step failed to complete. The most typical reason for a failure is an error in one of the environment variables paths. After selecting a build option, run the compile script: \begin{small} @@ -314,9 +316,9 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} ./compile >& compile.log \end{verbatim} \end{small} -It is recommended to capture the build information to a log file by redirecting the output incase it is necessary to diagnose build issues. +Capturing the build information to a log file by redirecting the output is necessary to diagnose build issues. -To conduct a complete clean, which removes ALL built files in ALL directories, as well as the configure.gsi, type: +To remove all built files in every directory, as well as the configure.gsi, type: \begin{small} \begin{verbatim} ./clean -a @@ -325,13 +327,13 @@ \subsection{Configure and Compile} \label{ch2_compiling_configandcompile} A complete clean is necessary if the compilation failed or if the configuration file is changed. Following a successful compile, the GSI executable \verb|gsi.exe| can be found in the \verb|run/| directory. -If the executable is not found, check the compilation log file. If the build failed, search for the first instance of the word "Error" (with a capital "E") to locate the section of the log with the failure. +If the executable is not found, check the compilation log file. If the build failed, search for the first instance of the word "Error" (with a capital "E") to locate the section of the log file with the failure. %------------------------------------------------------------------------------- \section{Example of Build} \label{ch2_buildexample} %------------------------------------------------------------------------------- -To illustrate the build process, the following section describes the steps necessary to build GSI on the NCAR supercomputer Yellowstone using the Intel compiler, the PGI compiler, and the Gnu compiler. Other platforms will be similar. +To illustrate the build process, the following section describes the steps necessary to build GSI on the NCAR Yellowstone supercomputer using the Intel compiler, the PGI compiler, and the Gnu compiler. Other platforms will be similar. %------------------------------------------------------------------------------- \subsection{Intel Build} @@ -344,14 +346,14 @@ \subsection{Intel Build} module load intel module load impi mkl ncarcompilers ncarbinlibs netcdf \end{verbatim} -These module commands have specified the compiler, mpi, the version of the LAPACK library (MKL) and the netcdf library. -\item For this case two of the paths must be set. The path to the WRF directory must always be specified, and the Intel Mathematics Kernal Library (MKL) will be used in place of the LAPACK library. Note that on Yellowstone, the variable MKLROOT is set to the path to the MKL libraries by loading the mkl module. To set the paths in a C-shell environment use: +These module commands have specified the compiler, MPI, the version of the LAPACK library (MKL), and the NetCDF library. +\item For this case, two of the paths must be set. The path to the WRF directory must always be specified, and the Intel Mathematics Kernal Library (MKL) will be used in place of the LAPACK library. Note that on Yellowstone, the variable MKLROOT is set to the path to the MKL libraries by loading the MKL module. To set the paths in a C-shell environment, use: \begin{verbatim} setenv WRF_DIR /PATH TO WRF DIRECTORY/ setenv LAPACK_PATH $MKLROOT \end{verbatim} -\item To run the configure script, type \verb|./configure| inside the top of the GSI directory. If the first three steps were completed successfully, a table of compiler options should appear. Select the desired compiler combination, which in this case is either 6 or 7. The alternative options (8 and 9) are needed for certain platforms that have a vendor supplied custom version of MPI. Try the default build options for MPI first, and only if it fails should the second option be used. -\item To compile the code, enter in a C-shell: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run| directory. +\item To run the configure script, type \verb|./configure| inside the top of the GSI directory. If the first three steps were completed successfully, a table of compiler options should appear. Select the desired compiler combination option, which in this case is either 6 or 7. The alternative options (eight and nine) are needed for certain platforms that have a vendor supplied custom version of MPI. Try the default build options for MPI first, and only if it fails should the second option be used. +\item To compile the code, type the following: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run| directory. \end{enumerate} %------------------------------------------------------------------------------- @@ -365,13 +367,13 @@ \subsection{PGI Build} module load pgi module load impi ncarcompilers ncarbinlibs netcdf \end{verbatim} -These module commands have specified the compiler, mpi, and the netcdf library. -\item For this case only the path to the WRF directory must be set. The PGI compiler comes with its own version of LAPACK that it finds automatically. It is not necessary to set the LAPACK path. In a C-shell environment use: +These module commands have specified the compiler, MPI, and the NetCDF library. +\item For this case only the path to the WRF directory must be set. The PGI compiler comes with its own version of LAPACK that it finds automatically. It is not necessary to set the LAPACK path. In a C-shell environment, use: \begin{verbatim} setenv WRF_DIR /PATH TO WRF DIRECTORY/ \end{verbatim} -\item Similar to the Intel example, pick compiler options listed in a table. In this case, the desired compiler combination is either 3 or 4. -\item To compile the code, enter in a C-shell: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run directory|. +\item Similar to the Intel example, pick compiler options listed in the table. In this case, the desired compiler combination option is either 3 or 4. +\item To compile the code, type the following: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run| directory. \end{enumerate} %------------------------------------------------------------------------------- @@ -385,14 +387,14 @@ \subsection{GNU Build} module load gnu/5.3.0 module load ncarcompilers ncarbinlibs netcdf lapack/3.2.1 \end{verbatim} -These module commands have specified the compiler, mpi, and the netcdf library. -\item For this case two of the paths must be set. The path to the WRF directory must always be specified, and we will use the LAPACK library installed by the module. In a C-shell environment use: +These module commands have specified the compiler, MPI, and the NetCDF library. +\item For this case, two of the paths must be set. The path to the WRF directory must always be specified, and we will use the LAPACK library installed by the module. In a C-shell environment use: \begin{verbatim} setenv WRF_DIR /PATH TO WRF DIRECTORY/ setenv LAPACK_PATH $LIB_NCAR \end{verbatim} -\item Similar to the Intel example, pick compiler options listed in a table. In this case, the desired compiler combination is 5. -\item To compile the code, enter in a C-shell: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run directory|. +\item Similar to the Intel example, pick compiler options listed in the table. In this case, the desired compiler combination option is 5. +\item To compile the code, type the following: \verb|./compile >& compile.log|. If the build completes successfully, an executable named \verb|gsi.exe| will be created in the \verb|./run| directory. \end{enumerate} %------------------------------------------------------------------------------- @@ -403,14 +405,14 @@ \section{System Requirements and External Libraries} \label{ch2_externallibs} The basic requirements for building and running the GSI system are the following: \begin{itemize} -\item FORTRAN 95+ compiler +\item FORTRAN 2003+ compiler \item C compiler \item MPI v1.2+ \item OpenMP \item Perl -\item NetCDF V3.6.3 or V4.2+ +\item NetCDF V4.2+ \item LAPACK and BLAS mathematics libraries, or equivalent -\item WRF V3.5+ +\item WRF V3.6+ \end{itemize} Because all but the last of these tools and libraries are typically the purview of system administrators to install and maintain, they are lumped together here as part of the basic system requirements. @@ -419,9 +421,9 @@ \section{System Requirements and External Libraries} \label{ch2_externallibs} \subsection{Compilers Tested for Release} %------------------------------------------------------------------------------- -Version 3.5 of the DTC community GSI system has been successfully tested on a variety of Linux platforms with many versions of the Intel and PGI fortran compilers. +Version 3.6 of the DTC community GSI system has been successfully tested on a variety of Linux platforms with many versions of the Intel and PGI fortran compilers. -Legacy build rules are also available for IBM AIX and Mac Darwin platforms. Because the DTC does not have the ability to test on these platforms, they are no longer supported. Also, Linux GNU gfortran option is added in this version. +Legacy build rules are also available for IBM AIX and Mac Darwin platforms. Because the DTC does not have the ability to test on these platforms, they are no longer supported. Also, the Linux GNU gfortran option is available in this version. The following Linux compiler combinations have been fully tested: \begin{table}[htbp] @@ -431,35 +433,161 @@ \subsection{Compilers Tested for Release} & Fortran compiler version & C compiler version \\ \hline \hline -Intel only & ifort 16.0.1, 15.0.1, 13.0.1, 12.1.5, 12.1.4 & icc \\ -\hline -Intel \& gcc & ifort 16.0.1, 15.0.1, 13.0.1, 12.1.5, 12.1.4 & gcc 4.8.2, 4.4.7 \\ +Intel only & ifort 17.0.1, 16.0.3, 15.0.3, 14.0.2 & icc \\ \hline -PGI only & pgf90 16.1, 15.10, 15.7, 15.1, 14.10, 14.9, 14.7, 13.9, 13.3 & pgcc \\ +PGI only & pgf90 17.5, 16.5, 15.7 & pgcc \\ \hline -PGI \& gcc & pgf90 16.1, 15.10, 15.7, 15.1, 14.10, 14.9, 14.7, 13.9, 13.3 & gcc 4.8.2 \\ -\hline -GNU only & gfortran 6.3.0, 5.3.0 & gcc 6.3.0, 5.3.0 \\ +GNU only & gfortran 5.4.0 with netcdf 4.4.0 & gcc 5.4.0 \\ \hline \end{tabular} \label{ch2_tble3} \end{table} -Unforeseen build issues may occur when using older compiler and library versions. As always, the best results come from using the most recent version of compilers. +Unforeseen build issues may occur when using older compiler and library versions. As always, the best results will be achieved by using the most recent compiler versions. %------------------------------------------------------------------------------- \section{Getting Help and Reporting Problems} \label{ch2_gettinghelp} %------------------------------------------------------------------------------- -Should the user experience any difficulty building GSI on their system, please first confirm that +Should a user experience any difficulty building GSI on his/her system, please first confirm that all the required software is properly installed (section \ref{ch2_compiling}). Next check that the -external libraries exist and that their specified paths in the configure file are correct. Lastly, +external libraries exist and specified paths in the configure file are correct. Lastly, check the resource file \textit{configure.gsi} for errors in any of the paths or settings. Should all these check out, feel free to contact the community GSI Help Desk for assistance at \begin{center} {gsi-help@ucar.edu} \end{center} -At a minimum, when reporting code building problems to the helpdesk, please include with your -email a copy of the build log, and the \textit{configure.gsi} file. +At a minimum, when reporting code building problems to the helpdesk, please include a copy of the build log and the \textit{configure.gsi} file with your e-mail. +%------------------------------------------------------------------------------- +\section{CMake Build System} \label{ch2_cmake} +%------------------------------------------------------------------------------- + +A new unified build system based on CMake has been added to the GSI code. +CMake is a very powerful cross-platform open-source build system. In comGSI, the CMake build system exists in parallel +to the previous DTC build system, and either one can be used independently to build the code. +The CMake build system is still experimental, but is available as an alternative to the traditional DTC build system. + +\subsection{CMake build process with the DTC script} +The CMake build infrastructure consists of a top level directory with the name \verb|cmake/| and configuration files in each directory named ( \verb|CMakeLists.txt|). The syntax for CMake relies on a two step command line process, similar to "configure" and "compile." Command line arguments are used to specify paths and compilers. To simplify the process, the DTC provides a helper script that simplifies the choice of arguments that need to be used. + +The helper script is called \verb|dtcbuild| and is located in the directory \verb|dtc|. This script attempts to walk the user through the process of building GSI. By default CMake prefers to build the source code "out-of-place," meaning that it does not populate the GSI directory with the build. The script first creates a directory called \verb|build| inside the source code directory to house the build process. If there is already a directory called build, the script halts with a warning to either rename it or delete it. + +The script then checks that the path variable for the WRF build, either \verb|WRFPATH| or \verb|WRF_DIR|, has been set. +It then prompts the user to choose a compiler for the build. +\begin{verbatim} +Please select from among the following supported platforms. + + 1. Linux x86_64, PGI compilers (pgf90 & pgcc) + 2. Linux x86_64, PGI compilers (pgf90 & gcc) + 3. Linux x86_64, GNU compilers (gfortran & gcc) + 4. Linux x86_64, Intel/gnu compiler (ifort & gcc) + 5. Linux x86_64, Intel compiler (ifort & icc) + 6. Linux x86_64, Intel compiler w/intel mpi (mpiifort & icc) + 7. Linux x86_64, Intel compiler (mpif90 -f90=ifort & icc) + +Enter selection [1-7] : +\end{verbatim} +Once a compiler has been chosen, it generates local makefiles by invoking the cmake command with the proper arguments. One of those arguments selects that a local build of the NCEP libraries needed by GSI will be conducted prior to the source code being built. The final step of the script is to invoke a parallel build of the code. + +This points to two advantages in using CMake to build the code. CMake automatically generates code dependencies each time a build is invoked, allowing the use of a parallel make, greatly reducing the time it take to complete the build. Typically the time to complete the CMake build is a quarter of the time needed for the serial DTC "configure" and "compile" to complete. + +Once the build is complete, the two executables a \verb|gsi.x| and \verb|enkf_gfs.x| are placed in the directory \verb|build/bin|. Note that the name of the executables and their location differs from the traditional DTC build. + +Summary of CMake build steps: +\begin{enumerate} +\item Set up the build environment in the same way as with the DTC build +\begin{itemize} + \item Set up compilers/load modules + \item Set the environment path for NetCDF and LaPack/MKL + \item Set the environment path for WRF by setting either of the variables \verb|WRFPATH| or \verb|WRF_DIR| to point to a compiled copy of the WRF code. +\end{itemize} +\item Copy the helper script into the top level directory (\verb|cp ./dtc/dtcbuild .|) +\item Run the helper script (\verb|./dtcbuild|) +\item Select the compiler combination for your build. For instance, number six for Intel on Theia. +\item When the build is complets, the executables \verb|gsi.x| and \verb|enkf_gfs.x| will be located in the directory \verb|build/bin| +\end{enumerate} + + +\subsection{Build notes and additional requirements} +Requirements: +\begin{itemize} +\item The CMake build requires use of version 2.8+ of cmake +\item GSI will not build with the Intel compiler V15.0 due to an incompatibility with the CRTM library. +\end{itemize} + +Build notes: +\begin{itemize} +\item Building on the UCAR Yellowstone supercomputer requires additional flags due to how the C compiler has been installed there. The platform specific script \verb|dtcbuild_yellowstone| accounts for this need. +\item The build is not conducted within the \verb|src/| directory, as is the case with the traditional DTC build, but instead is located in \verb|build/src/CMakeFiles|. +\item On the NCEP Theia supercomputer, GSI builds best with option six, due to the way MPI is set up on that machine. +\end{itemize} + +\subsection{How the helper script works} +This sections will go through the DTC helper script, each section at a time to illustrate how the CMake build works. + +One of the first things the script does is create the build directory; However, prior to this, it checks if a directory by that name already exists. If it does, the script halts with a warning. +\begin{small} +\begin{verbatim} + # create build directory +if test -d ./build ; then + echo "directory build already exists, delete or rename the directory and dry again" + exit +else + mkdir ./build +fi +\end{verbatim} +\end{small} + +Next, the environment variables indicating the top of the source tree and the location of the NCEP library source code are set. +\begin{small} +\begin{verbatim} +# set CORE_DIR to top of source tree. +CORE_DIR=`pwd` +echo "$CORE_DIR" +export CORE_DIR +export CORELIB=$CORE_DIR/libsrc +echo "$CORELIB" +\end{verbatim} +\end{small} + +Next the path to the WRF build is set. For the traditional DTC build, the variable \verb|WRF_DIR| is used. The current CMake build uses a different variable \verb|WRFPATH| to do the same thing. So as a work around, the script accepts the path information from either variable. +\begin{small} +\begin{verbatim} +# set path to WRF and test that it exists +if test -z "$WRFPATH" ; then + if test -z $WRF_DIR; then + echo '** WARNING: No path to WRF_DIR and environment variable WRF_DIR not set.' + exit + else + export WRFPATH=$WRF_DIR + fi +fi +\end{verbatim} +\end{small} + +The next section queries the user to select a compiler combination for the build. Many of the CMake build variables, such as compiler information, can be either set as environment variables or included in the command line argument. Here we set them as environment variables, and cmake is invoked. The following shows the variable settings for the combination of the PGI FORTRAN compiler and the Gnu C compiler. +\begin{small} +\begin{verbatim} + if [ "$resp" = "2" ] ; then + echo ' 2. Linux x86_64, PGI compilers (pgf90 & gcc) ' + export CC=gcc + export CXX=g++ + export FC=pgf90 + cd build + cmake -DBUILD_CORELIBS=ON $CORE_DIR + make -j 8 + fi +\end{verbatim} +\end{small} +These are standard guesses as to what the C, C++, and MPI call for the FORTRAN compiler are called on your system. They may be wrong. In that case the environment variables \verb|CC|, \verb|CXX|, and \verb|FC| may need to be modified. + +The final part of the script is the invocation of cmake. +\begin{small} +\begin{verbatim} + cmake -DBUILD_CORELIBS=ON $CORE_DIR + make -j 8 +\end{verbatim} +\end{small} +There are two arguments used here. The first is \verb|-DBUILD_CORELIBS=ON|. This argument directs CMake to look in the \verb| core-libs/| directory for rules to build the NCEP libraries needed for GSI. The environment variable \verb| CORELIB|, defined at the top of the script, indicates where to look for the library source code. In this case, it is in \verb|$CORE_DIR/libsrc/|. Any changes to the source code would be placed in \verb|$CORE_DIR/libsrc/|, and any changes to the CMake build rules would go in \verb|core-libs/|. The second argument \verb|$CORE_DIR| indicates the location of the build directory. The final statement \verb|make -j 8| invokes a parallel call to \textit{make} using eight processors, which speeds up the build considerably. For more details on the CMake build for GSI, see the readme file \textit{README.cmake} in the top directory. diff --git a/doc/GSI_user_guide/gsi_ch3.tex b/doc/GSI_user_guide/gsi_ch3.tex index 4227e01dc..c8a03f965 100644 --- a/doc/GSI_user_guide/gsi_ch3.tex +++ b/doc/GSI_user_guide/gsi_ch3.tex @@ -1,14 +1,14 @@ -\chapter{Running GSI} +\chapter{Running GSI}\label{gsi_run} \setlength{\parskip}{12pt} -This chapter discusses the issues of running GSI. It starts with introductions to the input data required to run GSI. Then proceeds with a detailed explanation of an example GSI run script and introductions to the result files produced by a successful GSI run. It concludes with some frequently used options from the GSI namelist. +This chapter discusses the issues of running GSI. It starts with introductions to the input data required to run GSI, then proceeds with a detailed explanation of an example GSI run script and introductions to files produced by a successful GSI run. It concludes with some frequently used options from the GSI namelist. %------------------------------------------------------------------------------- \section{Input Data Required to Run GSI} \label{sec3.1} %------------------------------------------------------------------------------- -In most cases, three types of input data (background, observation, and fixed files) must be available before running GSI. In some special idealized cases, such as a pseudo single observation test, GSI can be run without any observations. If running GSI with 3D EnVAR hybrid option, global or regional ensemble forecasts are also needed. +In most cases, three types of input data (background, observations, and fixed files) must be available before running GSI. In some special idealized cases, such as a pseudo single observation test, GSI can be run without any observations. If running GSI with the 3D EnVar hybrid option, global or regional ensemble forecasts are also needed. %------------------------------------------------------------------------------- \subsection{Background or First Guess Field} @@ -18,10 +18,10 @@ \subsection{Background or First Guess Field} \begin{small} \begin{description} -\item[ ] a) WRF NMM input fields in binary format -\item[ ] b) WRF NMM input fields in NetCDF format -\item[ ] c) WRF ARW input fields in binary format -\item[ ] d) WRF ARW input fields in NetCDF format +\item[ ] a) WRF-NMM input fields in binary format +\item[ ] b) WRF-NMM input fields in NetCDF format +\item[ ] c) WRF-ARW input fields in binary format +\item[ ] d) WRF-ARW input fields in NetCDF format \item[ ] e) GFS input fields in binary format or through NEMS I/O \item[ ] f) NEMS-NMMB input fields \item[ ] g) RTMA input files (2-dimensional binary format) @@ -30,11 +30,11 @@ \subsection{Background or First Guess Field} \end{description} \end{small} -The WRF is a community model system, including two dynamical cores: the Advanced Research WRF (ARW) and the Nonhydrostatic Mesoscale Model (NMM). The GFS (Global Forecast System), NEMS (National Environmental Modeling System)-NMMB (Nonhydrostatic Mesoscale Model B-Grid), and RTMA (Real-Time Mesoscale Analysis) are operational systems of NCEP. The DTC mainly supports GSI for regional community model WRF. Therefore, most of the multiple platform tests were conducted using WRF netcdf background files (d). The DTC also supports the GSI in global and chemical applications with limited resources. The following backgrounds have been tested for the release: +The Weather Research and Forecasting (WRF) community modeling system includes two dynamical cores: the Advanced Research WRF (ARW) and the Nonhydrostatic Mesoscale Model (NMM). The GFS (Global Forecast System), NEMS (National Environmental Modeling System)-NMMB (Nonhydrostatic Mesoscale Model B-Grid), and RTMA (Real-Time Mesoscale Analysis) are operational systems at the National Center for Environmental Prediction (NCEP). The DTC mainly supports GSI for regional WRF applications. Therefore, most of the multiple platform tests were conducted using WRF netcdf background files (d). The DTC also supports the GSI in global and chemical applications with limited resources. The following backgrounds have been tested for this release: \begin{small} \begin{enumerate} -\item ARW NetCDF (d) were tested with multiple cases +\item ARW NetCDF (d) were tested with multiple cases \item GFS (e) was tested with multiple NCEP cases \item WRF-Chem NetCDF (h) was tested with a single case \item NEMS-NMMB(f) was tested with a single case @@ -46,17 +46,17 @@ \subsection{Background or First Guess Field} \subsection{Observations} %------------------------------------------------------------------------------- -GSI can analyze many types of observational data, including conventional data, satellite radiance observations, GPS Radio Occultations, and radar data et al. The default observation file names given in released GSI namelist, the corresponding observations included in each files and sample BUFR files downloadable from the NCEP website are listed in table \ref{t31} on the next page. +GSI can analyze many types of observational data, including conventional data, satellite radiance observations, GPS Radio Occultations, and radar data, among others. The default observation file names are given in the released GSI namelist, with corresponding observations included in each file. Sample BUFR files available for download from the NCEP website listed in table \ref{t31}. -The observations are complex and many observations need format converting and quality control before being used by GSI. GSI ingests observations saved in the BUFR format (with NCEP specified features). The NCEP processed PrepBUFR and BUFR files can be used directly. If users need to introduce their own data into GSI, please check the following website for User\textquotesingle s Guide and examples on BUFR/PreBUFR processing: +The observations are complex and many observations need format converting and quality control before being used by GSI. GSI ingests observations saved in BUFR format (with NCEP specified features). The NCEP processed PrepBUFR and BUFR files can be used directly. If users need to introduce their own data into GSI, please check the following website for the User\textquotesingle s Guide and examples of BUFR/PreBUFR processing: \begin{center} \url{http://www.dtcenter.org/com-GSI/BUFR/index.php} \end{center} -DTC supports BUFR/PrepBUFR data processing and quality control as part of GSI community tasks. +DTC supports BUFR/PrepBUFR data processing and quality control as part of the GSI community tasks. -GSI can analyze all of the data types in table \ref{t31}, but each GSI run (for both operation and case study) only uses a subset of the data. Some data may be outdated and not available, some are on monitoring mode, and some data may have quality issues during certain periods. Users are encouraged to check the data quality issues prior to running an analysis. The following NCEP links provide resources that include data quality history: +GSI can analyze all of the data types in table \ref{t31}, but each GSI run (for both operation and case study purposes) only uses a subset of the data. Some data may be outdated and not available, some are in monitoring mode, and some may have quality issues during certain periods. Users are encouraged to check data quality prior to running an analysis. The following NCEP links provide resources that include data quality history: \begin{center} \begin{scriptsize} @@ -68,13 +68,12 @@ \subsection{Observations} Because the current regional models do not have ozone as a prognostic variable, ozone data are not assimilated on the regional scale. -GSI can be run without any observations to see how the moisture constraint modifies the first guess (background) field. GSI can be run in a pseudo single observation mode, which does not require any BUFR observation files. In this mode, users should specify observation information in the namelist section SINGLEOB\_TEST (see Section \ref{sec4.2} for details). As more data files are used, additional information will be added through the GSI analysis. - +GSI can be run without any observations to see how the moisture constraint modifies the first guess (background) field. GSI can also be run in a pseudo single observation mode, which does not require any BUFR observation files. In this mode, users should specify observation information in the namelist section SINGLEOB\_TEST (see Section \ref{sec4.2} for details). As more data files are used, additional information will be added through the GSI analysis. \begin{table}[htbp] \centering \begin{footnotesize} -\caption{GSI observation file name, content, and examples} +\caption{GSI observation file names, content, and examples} \begin{tabular}{|l|p{7cm}|c|} \hline \hline @@ -155,17 +154,17 @@ \subsection{Fixed Files (Statistics and Control Files)} A GSI analysis also needs to read specific information from statistic files, configuration files, bias correction files, and CRTM coefficient files. We refer to these files as fixed files and they are located in a directory called \verb|fix/| in the release package, except for CRTM coefficients. -Table \ref{t32} lists fixed files required in a GSI run, the content of the files, and corresponding example files from the regional and global applications: +Table \ref{t32} lists fixed files required for a GSI run, the content of the files, and corresponding example files from the regional and global applications: -Because most of those fixed files have hardwired names inside the GSI, a GSI run script needs to copy or link those files (right column in table \ref{t32}) from \verb|./fix| directory to GSI run directory with the file name required in GSI (left column in table \ref{t32}). For example, if GSI runs with ARW background case, the following line should be in the run script: +Because most of those fixed files have hardwired names inside the GSI, a GSI run script needs to copy or link those files (right column in table \ref{t32}) from the \verb|./fix| directory to the GSI run directory with the file name required in GSI (left column in table \ref{t32}). For example, if GSI runs with an ARW background, the following line should be in the run script: \begin{small} \begin{verbatim} -cp ${path of the fix directory}/anavinfo_arw_netcdf anavinfo +cp ${path of the fix directory}/anavinfo_arw_netcdf anavinfo \end{verbatim} \end{small} -Note that in this release, there is a strict rule that the numbers of vertical levels in the file \verb|anavinfo| must match the background file (for example, \verb|wrfinput_d01|) for the 3-dimensional variables. Otherwise GSI will fail. To find out the correct numbers of vertical levels, users can dump out (use \verb|ncdump -h|) the dimensions from the NetCDF background file and find the number for \verb|bottom_top| and \verb|bottom_top_stag|. For example, if the dimensions for the background file is: +Note that in this release, there is a strict rule that the numbers of vertical levels in the file \verb|anavinfo| must match the background file (for example, \verb|wrfinput_d01|) for the 3-dimensional variables. Otherwise GSI will fail. To identify the correct numbers of vertical levels, users can dump out (use \verb|ncdump -h|) the dimensions from the NetCDF background file and find the number for \verb|bottom_top| and \verb|bottom_top_stag|. For example, if the dimensions for the background file is: \begin{small} \begin{verbatim} @@ -174,7 +173,7 @@ \subsection{Fixed Files (Statistics and Control Files)} \end{verbatim} \end{small} -Then the corresponding \verb|anavinfo| file should have 51 levels for \verb|prse| (3-dimemsional pressure field) and 50 levels for other three-dimensional variables such as u, v, tv, q, oz, cw and etc. For details, users can dump out the global attributes of the background file and find the number of vertical levels for each variable. The following shows part of \verb|anavinfo| for the above background: +Then the corresponding \verb|anavinfo| file should have 51 levels for \verb|prse| (3-dimemsional pressure field) and 50 levels for other three-dimensional variables such as u, v, tv, q, oz, cw, etc. For details, users can dump out the global attributes of the background file and find the number of vertical levels for each variable. The following shows part of the \verb|anavinfo| file for the above background: \newpage @@ -243,38 +242,9 @@ \subsection{Fixed Files (Statistics and Control Files)} \end{footnotesize} \end{table} +Each operational system, such as GFS, NAM, RAP, and RTMA, has their own set of fixed files. For your specific GSI runs, you need to get the correct set of fixed files. Fixed files for regional applications are included in this GSI/EnKF release and put under the \textit{fix/} directory. Fixed files for global applications are not included in this release in order to save space. Please download \verb|comGSIv3.6_EnKFv1.2_fix_global.tar.gz| if you need to run global cases. Note that little endian background error covariance files are no longer supported. - -Each operational system, such as GFS, NAM, RAP, and RTMA, has their own set of fixed files. Therefore, for each fixed file used in GSI, there are several corresponding fixed files in the directory \verb| fix/ | that users can choose. For example, for the background error covariance file, both \verb|nam_nmmstat_na.gcv| (from the NAM system) and \verb|nam_glb_berror.f77.gcv| (from the global forecast system) can be used. We also prepared the same background error covariance files with different byte order such as files under \verb| ./fix/Little_Endian| and \verb| ./fix/Big_Endian| directory. To help users to set up these fixed files for different GSI applications, several sample run scripts are provided with the release version. - -To make \verb|./fix| directory easy to manage, this release version created 3 sub-directories to hold special group of fix files, which are introduced in table \ref{t33}. - - -\begin{table}[htbp] -\centering -\begin{small} -\caption{List of sub-directories in fix directory} -\begin{tabular}{|p{3cm}|p{9cm}|} -\hline -\hline -Directory name & Content \\ -\hline -\hline -Little\_Endian & Little Endian Bacground Error covariance (BE) files \\ -\hline -Big\_Endian & Big Endian BE files \\ -\hline -global & Global BE files and ch4, co, co2, n2o history files \\ -\hline -\end{tabular} -\label{t33} -\end{small} -\end{table} - - -Please note released \verb|comGSIv3.5_EnKFv1.1| tar files dosen\textquotesingle t include \verb|./fix/global| and \verb|./fix/Little_Endian| for space saving. Please downloand \verb|comGSIv3.5_EnKFv1.1_fix_global.tar.gz| if you need to run global case, \verb|comGSIv3.5_EnKFv1.1_fix_Little_Endian.tar.gz| if you need Little\_endian BE files. - -Each release version GSI calls certain version of CRTM library and needs the corresponding version of CRTM coefficients to do radiance data assimilation. This version of GSI uses CRTM 2.2.3. The coefficients files are listed in table \ref{t34}. +Each release version of the GSI calls a certain version of the CRTM library and needs corresponding CRTM coefficients to do radiance data assimilation. This version of GSI uses CRTM 2.2.3. The coefficient files are listed in table \ref{t34}. \begin{table}[htbp] @@ -327,19 +297,19 @@ \section{GSI Run Script} In this release version, three sample run scripts are available for different GSI applications: \begin{itemize} -\item \verb|comGSIv3.5_EnKFv1.1/run/run_gsi_regional.ksh| for regional GSI -\item \verb|comGSIv3.5_EnKFv1.1/run/run_gsi_global.ksh| for global GSI (GFS) -\item \verb|comGSIv3.5_EnKFv1.1/run/run_gsi_chem.ksh| for chemical analysis +\item \verb|dtc/run/run_gsi_regional.ksh| for regional GSI +\item \verb|dtc/run/run_gsi_global.ksh| for global GSI (GFS) +\item \verb|dtc/run/run_gsi_chem.ksh| for chemical analysis \end{itemize} These scripts will be called to generate GSI namelists: \begin{itemize} -\item \verb|comGSIv3.5_EnKFv1.1/run/comgsi_namelist.sh| for regional GSI -\item \verb|comGSIv3.5_EnKFv1.1/run/comgsi_namelist_gfs.sh| for global GSI (GFS) -\item \verb|comGSIv3.5_EnKFv1.1/run/comgsi_namelist_chem.sh| for GSI chemical analysis +\item \verb|dtc/run/comgsi_namelist.sh| for regional GSI +\item \verb|dtc/run/comgsi_namelist_gfs.sh| for global GSI (GFS) +\item \verb|dtc/run/comgsi_namelist_chem.sh| for GSI chemical analysis \end{itemize} -We will introduce the regional run scripts (\verb|run_gsi_regional.ksh|) in detail in the following sections and introduce the global run script when we introduce the GSI global application in Advanced GSI User\textquotesingle s Guide. +We will introduce the regional run scripts (\verb|run_gsi_regional.ksh|) in detail in the following sections and introduce the global run script when we discuss the GSI global application in the Advanced GSI User\textquotesingle s Guide. Note there is also a run script for regional EnKF (\verb|run_enkf_wrf.ksh|), a run script for global EnKF (\verb|run_enkf_global.ksh|) and the EnKF namelist script (\verb|enkf_wrf_namelist.sh|) in the same directory, which will be introduced in the EnKF User\textquotesingle s Guide. @@ -347,7 +317,7 @@ \section{GSI Run Script} \subsection{Steps in the GSI Run Script} %------------------------------------------------------------------------------- -The GSI run script creates a run time environment necessary for running the GSI executable. A typical GSI run script includes the following steps: +The GSI run script creates a run time environment necessary to run the GSI executable. A typical GSI run script includes the following steps: \begin{enumerate} \item Request computer resources to run GSI. @@ -355,18 +325,18 @@ \subsection{Steps in the GSI Run Script} \item Set experimental variables (such as experiment name, analysis time, background, and observation). \item Set the script that generates the GSI namelist. \item Check the definitions of required variables. -\item Generate a run directory for GSI (sometimes called working or temporary directory). +\item Generate a run directory for GSI (sometimes called a working or temporary directory). \item Copy the GSI executable to the run directory. -\item Copy the background file to the run directory and create an index file listing the location and name of ensemble members if running the hybrid. +\item Copy the background file to the run directory and create an index file listing the location and name of ensemble members if running with a hybrid set up. \item Link observations to the run directory. \item Link fixed files (statistic, control, and coefficient files) to the run directory. \item Generate namelist for GSI. \item Run the GSI executable. -\item Post-process: save analysis results, generate diagnostic files, clean run directory. +\item Post-process: save analysis results, generate diagnostic files, and clean the run directory. \item Run GSI as observation operator for EnKF, only for \verb|if_observer=Yes|. \end{enumerate} -Typically, users only need to modify specific parts of the run script (steps 1, 2, and 3) to fit their specific computer environment and point to the correct input/output files and directories. Users may also need to modify step 4 if changes are made to the namelist and it is under a different name or at a different location. Next section (\ref{sec3.2.2}) covers each of these modifications for steps 1 to 3. Section \ref{sec3.2.3} will dissect a sample regional GSI run script and introduce each piece of this sample GSI run script. Users should start with the run script provided in the same release package with GSI executable and modify it for their own run environment and case configuration. +Typically, users only need to modify specific parts of the run script (steps 1, 2, and 3) to fit their specific computer environment and point to the correct input/output files and directories. Users may also need to modify step 4 if changes are made to the namelist and it is under a different name or at a different location. The next section (\ref{sec3.2.2}) covers each of these modifications for steps 1 to 3. Section \ref{sec3.2.3} will dissect a sample regional GSI run script and introduce each piece of this sample GSI run script. Users should start with the run script provided in the same release package with the GSI executable and modify it for their own run environment and case configuration. %------------------------------------------------------------------------------- @@ -376,7 +346,7 @@ \subsection{Customization of the GSI Run Script} \text {3.2.2.1 Setting Up the Machine Environment} -This section focuses on step 1 of the run script: modify the machine specific entries. Specifically, this consists of setting Unix/Linux environment variables and selecting the correct parallel run time environment (batch system with options). +This section focuses on step 1 of the run script: modifying the machine specific entries. Specifically, this consists of setting Unix/Linux environment variables and selecting the correct parallel run time environment (batch system with options). GSI can be run with the same parallel environments as other MPI programs, for example: @@ -385,8 +355,8 @@ \subsection{Customization of the GSI Run Script} \item IBM supercomputer using LoadLevel \item Linux clusters using PBS (Portable Batch System) \item Linux clusters using LSF -\item Linux workstation (with no batch system) -\item Intel Mac Darwin workstation with PGI complier (with no batch system) +\item Linux workstation (no batch system) +\item Intel Mac Darwin workstation with PGI complier (no batch system) \end{itemize} Two queuing systems are listed below as examples: @@ -433,9 +403,9 @@ \subsection{Customization of the GSI Run Script} \label{t35} \end{table} -In both of the examples above, environment variables are set specifying system resource management, such as the number of processors, the name/type of queue, maximum wall clock time allocated for the job, options for standard out and standard error, etc. Some platforms need additional definitions to specify Unix environmental variables that further define the run environment. +In both of the examples above, environment variables are set specifying system resource management, such as the number of processors, the name/type of queue, maximum wall clock time allocated for the job, options for standard out and standard error, etc. Some platforms need additional definitions to specify Unix environment variables that further define the run environment. -These variable settings can significantly impact the GSI run efficiency and accuracy of the GSI results. Please check with your system administrator for the optimal settings for your computer system. Note that while the GSI can be run with any number of processors, it will not scale well with the increase of processor numbers after a certain threshold based on the case configuration and GSI application types. +These variable settings can significantly impact the GSI run efficiency and accuracy of the GSI results. Please check with your system administrator for optimal settings for your computer system. Note that while the GSI can be run with any number of processors, it will not scale well with the increase of processor numbers after a certain threshold based on the case configuration and GSI application types. \text{3.2.2.2 Setting up the Running Environment} @@ -482,7 +452,7 @@ \subsection{Customization of the GSI Run Script} \text{3.2.2.3 Setting Up an Analysis Case} -This section discusses setting up variables specific to user\textquotesingle s case, such as analysis time, working directory, background and observation files, location of fixed files and CRTM coefficients, the GSI executable file and the script generating GSI namelist. +This section discusses setting up variables specific to a given case, such as analysis time, working directory, background and observation files, location of fixed files and CRTM coefficients, the GSI executable file, and the script generating GSI namelist. \begin{footnotesize} \begin{verbatim} @@ -497,26 +467,26 @@ \subsection{Customization of the GSI Run Script} # OBS_ROOT = path of observations files # FIX_ROOT = path of fix files # GSI_EXE = path and name of the gsi executable - ANAL_TIME=2014061700 + ANAL_TIME=2017051312 HH=`echo $ANAL_TIME | cut -c9-10` - WORK_ROOT=comGSIv3.5_EnKFv1.1/run/testarw - OBS_ROOT=GSI_DTC/data/20140617/${ANAL_TIME}/obs + WORK_ROOT=testarw + OBS_ROOT=data/${ANAL_TIME}/obs PREPBUFR=${OBS_ROOT}/nam.t${HH}z.prepbufr.tm00.nr - BK_ROOT=GSI_DTC/data/20140617/${ANAL_TIME}/arw + BK_ROOT=data/${ANAL_TIME}/arw BK_FILE=${BK_ROOT}/wrfinput_d01.${ANAL_TIME} - CRTM_ROOT=GSI_DTC/data/fix/CRTM_2.2.3 - GSI_ROOT=comGSIv3.5_EnKFv1.1 + CRTM_ROOT=fix/CRTM_2.2.3 + GSI_ROOT=comGSI FIX_ROOT=${GSI_ROOT}/fix - GSI_EXE=${GSI_ROOT}/run/gsi.exe - GSI_NAMELIST=${GSI_ROOT}/run/comgsi_namelist.sh + GSI_EXE=${GSI_ROOT}/dtc/run/gsi.exe + GSI_NAMELIST=${GSI_ROOT}/dtc/run/comgsi_namelist.sh \end{verbatim} \end{footnotesize} -When picking the observation BUFR files, a few cautions to be aware of are: +When picking the observation BUFR files, please be aware of the following: \begin{itemize} -\item GSI run will stop if the time in the background file cannot match the cycle time in the observation BUFR file used for the GSI run (there is a namelist option to turn this check off). -\item Even if their contents are identical, PrepBUFR/BUFR files will differ if they were created on platforms with different endian byte order specification (Linux vs. IBM). Appendix A.1 discusses the conversion tool ssrc to byte-swap observation files. Since the release version 3.2, GSI compiled with PGI and Intel can automatically handle the byte order issue in PrepBUFR and BUFR files. Users can directly link any order BUFR file if working with Intel and PGI platform. +\item GSI run will stop if the time in the background file does not match the cycle time in the observation BUFR file used for the GSI run (there is a namelist option to turn this verification step off). +\item Even if their contents are identical, PrepBUFR/BUFR files will differ if they were created on platforms with different endian byte order specification (Linux vs. IBM). Appendix A.1 discusses the conversion tool SSRC used to byte-swap observation files. Since release version 3.2, GSI compiled with PGI and Intel can automatically handle byte order issues in PrepBUFR and BUFR files. Users can directly link BUFR files of any order if working with Intel and PGI platform. \end{itemize} The next part of this block focuses on additional options that specify important aspects of the GSI configuration. @@ -528,21 +498,43 @@ \subsection{Customization of the GSI Run Script} # (GLOBAL or NAM) # if_clean = clean : delete temperal files in working directory (default) # no : leave running directory as is (this is for debug only) +# if_observer = Yes : only used as observation operater for enkf +# if_hybrid = Yes : Run GSI as 3D/4D EnVar +# if_4DEnVar = Yes : Run GSI as 4D EnVar + if_hybrid=No # Yes, or, No -- case sensitive ! + if_4DEnVar=No # Yes, or, No -- case sensitive (if_hybrid must be Yes)! + if_observer=No # Yes, or, No -- case sensitive ! + bk_core=ARW bkcv_option=NAM if_clean=clean -# if_observer = Yes : only used as observation operater for enkf +# +# setup for GSI 3D/4D EnVar hybrid + if [ ${if_hybrid} = Yes ] ; then + ENS_ROOT=data/dacase/2017051312 + ENSEMBLE_FILE_mem=${ENS_ROOT}/gfsens/sfg_2017051306_fhr06s + + if [ ${if_4DEnVar} = Yes ] ; then + BK_FILE_P1=${BK_ROOT}/wrfout_d01_2017-05-13_19:00:00 + BK_FILE_M1=${BK_ROOT}/wrfout_d01_2017-05-13_17:00:00 + + ENSEMBLE_FILE_mem_p1=${ENS_ROOT}/sfg_2017051312_fhr09s + ENSEMBLE_FILE_mem_m1=${ENS_ROOT}/sfg_2017051312_fhr03s + fi + fi + # no_member number of ensemble members # BK_FILE_mem path and base for ensemble members - if_observer=No # Yes, or, No -- case sensitive ! no_member=20 BK_FILE_mem=${BK_ROOT}/wrfarw.mem \end{verbatim} \end{footnotesize} -Option bk\_core indicates the specific dynamic core used to create the background files and is used to specify the core in the namelist. In this release, there is another bk\_core option for NMMB in addition to WRF ARW and NMM, and also an option if\_observer=Yes to run GSI as observation operator for EnKF. Option bkcv\_option specifies the background error covariance to be used in the case. Two regional background error covariance matrices are provided with the release, one from NCEP global data assimilation (GDAS), and one from NAM data assimilation system (NDAS). Please check Section \ref{sec4.8} for more details about GSI background error covariance. Option if\_clean is to tell the run script if it needs to delete temporal intermediate files in the working directory after a GSI run is completed. Option if\_observer is to tell the run script if it needs to run GSI as observation operator for EnKF. +Option if\_hybrid controls whether to run a hybrid ensemble/variational data analysis. If if\_hybrid=Yes, option if\_4DEnVar=Yes indicates a hybrid 4D-EnVar analysis will be run, while if\_4DEnVar=No indicates a hybrid 3DEnVAR analysis will be run. Option if\_observer determines whether GSI is run as an observation operator for EnKF. + +Option bk\_core indicates the specific dynamic core used to create the background files and specifies the core in the namelist. Option bk\_core can be ARW or NMMB. Option bkcv\_option specifies the background error covariance to be used in the case. Two regional background error covariance matrices are provided with the release, one from NCEP global data assimilation (GDAS), and one from the NAM data assimilation system (NDAS). Please check Section \ref{sec4.8} for more details about GSI background error covariance. Option if\_clean tells the script if it needs to delete temporary intermediate files in the working directory after a GSI run is completed. -In most of case after the following point, users should only make minor changes: +In most cases, users should only make minor changes after the following: \begin{footnotesize} \begin{verbatim} @@ -563,7 +555,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} Listed below is an annotated regional run script with explanations on each function block. -For further details on the first 3 blocks of the script that users need to change, check section 3.2.2.1, 3.2.2.2, and 3.2.2.3: +For further details on the first three blocks of the script that users need to change, see sections 3.2.2.1, 3.2.2.2, and 3.2.2.3: \begin{footnotesize} \begin{verbatim} @@ -576,7 +568,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} # # GSIPROC = processor number used for GSI analysis #------------------------------------------------ - GSIPROC=8 + GSIPROC=4 ARCH='LINUX_LSF' # Supported configurations: @@ -595,18 +587,18 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} # OBS_ROOT = path of observations files # FIX_ROOT = path of fix files # GSI_EXE = path and name of the gsi executable - ANAL_TIME=2014061700 + ANAL_TIME=2017051312 HH=`echo $ANAL_TIME | cut -c9-10` - WORK_ROOT=run/testarw - OBS_ROOT=data/20140617/obs + WORK_ROOT=testarw + OBS_ROOT=data/${ANAL_TIME}/obs PREPBUFR=${OBS_ROOT}/nam.t${HH}z.prepbufr.tm00.nr - BK_ROOT=data/20140617/2014061700/arw + BK_ROOT=data/${ANAL_TIME}/arw BK_FILE=${BK_ROOT}/wrfinput_d01.${ANAL_TIME} - CRTM_ROOT=data/CRTM_2.2.3 - GSI_ROOT=code/comGSIv3.5_EnKFv1.1 + CRTM_ROOT=fix/CRTM_2.2.3 + GSI_ROOT=comGSI FIX_ROOT=${GSI_ROOT}/fix - GSI_EXE=${GSI_ROOT}/run/gsi.exe - GSI_NAMELIST=${GSI_ROOT}/run/comgsi_namelist.sh + GSI_EXE=${GSI_ROOT}/dtc/run/gsi.exe + GSI_NAMELIST=${GSI_ROOT}/dtc/run/comgsi_namelist.sh #------------------------------------------------ # bk_core= which WRF core is used as background (NMM or ARW or NMMB) @@ -614,13 +606,33 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} # (GLOBAL or NAM) # if_clean = clean : delete temperal files in working directory (default) # no : leave running directory as is (this is for debug only) +# if_observer = Yes : only used as observation operater for enkf +# if_hybrid = Yes : Run GSI as 3D/4D EnVar +# if_4DEnVar = Yes : Run GSI as 4D EnVar + if_hybrid=No # Yes, or, No -- case sensitive ! + if_4DEnVar=No # Yes, or, No -- case sensitive (if_hybrid must be Yes)! + if_observer=No # Yes, or, No -- case sensitive ! + bk_core=ARW bkcv_option=NAM if_clean=clean -# if_observer = Yes : only used as observation operater for enkf +# +# setup for GSI 3D/4D EnVar hybrid + if [ ${if_hybrid} = Yes ] ; then + ENS_ROOT=data/dacase/2017051312 + ENSEMBLE_FILE_mem=${ENS_ROOT}/gfsens/sfg_2017051306_fhr06s + + if [ ${if_4DEnVar} = Yes ] ; then + BK_FILE_P1=${BK_ROOT}/wrfout_d01_2017-05-13_19:00:00 + BK_FILE_M1=${BK_ROOT}/wrfout_d01_2017-05-13_17:00:00 + + ENSEMBLE_FILE_mem_p1=${ENS_ROOT}/sfg_2017051312_fhr09s + ENSEMBLE_FILE_mem_m1=${ENS_ROOT}/sfg_2017051312_fhr03s + fi + fi + # no_member number of ensemble members # BK_FILE_mem path and base for ensemble members - if_observer=No # Yes, or, No -- case sensitive ! no_member=20 BK_FILE_mem=${BK_ROOT}/wrfarw.mem \end{verbatim} @@ -636,7 +648,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The next block sets run command to run GSI on multiple platforms. The ARCH is set in the beginning of the script. Option BYTE\_ORDER has been set as Big\_Endian because GSI compiled with Intel and PGI can read in Big\_Endian background error file, BUFR file and CRTM coefficient files. +The next block sets the run command for GSI on multiple platforms. The ARCH variable is set at the beginning of the script. Option BYTE\_ORDER has been set as Big\_Endian because GSI compiled with Intel and PGI can read a Big\_Endian background error file, BUFR files, and CRTM coefficient files. \begin{footnotesize} \begin{verbatim} @@ -682,7 +694,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The next block checks if all the variables needed for a GSI run are properly defined. These variables should have been defined in the first 3 parts of this script. +The next block checks if all the variables needed for a GSI run are properly defined. These variables should have been defined in the first three parts of this script. \begin{scriptsize} \begin{verbatim} @@ -759,7 +771,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \begin{scriptsize} \begin{verbatim} ################################################################################## -# Create the ram work directory and cd into it +# Create the work directory and cd into it workdir=${WORK_ROOT} echo " Create working directory:" ${workdir} @@ -780,10 +792,14 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} # Bring over background field (it's modified by GSI so we can't link to it) cp ${BK_FILE} ./wrf_inout +if [ ${if_4DEnVar} = Yes ] ; then + cp ${BK_FILE_P1} ./wrf_inou3 + cp ${BK_FILE_M1} ./wrf_inou1 +fi \end{verbatim} \end{scriptsize} -Note: You can link observation files to the working directory because GSI will not overwrite these files. The observations that can be analyzed in GSI are listed in the column dfile of the GSI namelist section OBS\_INPUT, as specified in \verb|run/comgsi_namelist.sh|. Most of the conventional observations are in one single file named prepbufr, while different radiance data are in separate files based on satellite instruments, such as AMSU-A or HIRS. All these observation files must be linked as GSI recognized file names in dfile. Please check table \ref{t31} for a detailed explanation of links and the meanings of each file name listed below. +Note: You can link observation files to the working directory because GSI will not overwrite these files. The observations that can be analyzed in GSI are listed in the column "dfile" of the GSI namelist section OBS\_INPUT, as specified in \verb|run/comgsi_namelist.sh|. Most of the conventional observations are in one single file named prepbufr, while different radiance data are in separate files based on satellite instruments, such as AMSU-A or HIRS. All these observation files must be linked as GSI recognized file names in "dfile." Please check table \ref{t31} for a detailed explanation of links and the meanings of each file name listed below. \begin{footnotesize} \begin{verbatim} @@ -792,12 +808,56 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} # ln -s ${OBS_ROOT}/gdas1.t${HH}z.sptrmm.tm00.bufr_d tmirrbufr # Link to the radiance data - ln -s ${OBS_ROOT}/gdas1.t${HH}z.1bamua.tm00.bufr_d amsuabufr - ln -s ${OBS_ROOT}/nam.t${HH}z.1bamub.tm00.bufr_d amsubbufr - ln -s ${OBS_ROOT}/nam.t${HH}z.1bhrs3.tm00.bufr_d hirs3bufr - ln -s ${OBS_ROOT}/gdas1.t${HH}z.1bhrs4.tm00.bufr_d hirs4bufr - ln -s ${OBS_ROOT}/nam.t${HH}z.1bmhs.tm00.bufr_d mhsbufr - ln -s ${OBS_ROOT}/nam.t${HH}z.gpsro.tm00.bufr_d gpsrobufr +srcobsfile[1]=${OBS_ROOT}/gdas1.t${HH}z.satwnd.tm00.bufr_d +gsiobsfile[1]=satwnd +srcobsfile[2]=${OBS_ROOT}/gdas1.t${HH}z.1bamua.tm00.bufr_d +gsiobsfile[2]=amsuabufr +srcobsfile[3]=${OBS_ROOT}/gdas1.t${HH}z.1bhrs4.tm00.bufr_d +gsiobsfile[3]=hirs4bufr +srcobsfile[4]=${OBS_ROOT}/gdas1.t${HH}z.1bmhs.tm00.bufr_d +gsiobsfile[4]=mhsbufr +srcobsfile[5]=${OBS_ROOT}/gdas1.t${HH}z.1bamub.tm00.bufr_d +gsiobsfile[5]=amsubbufr +srcobsfile[6]=${OBS_ROOT}/gdas1.t${HH}z.ssmisu.tm00.bufr_d +gsiobsfile[6]=ssmirrbufr +# srcobsfile[7]=${OBS_ROOT}/gdas1.t${HH}z.airsev.tm00.bufr_d +gsiobsfile[7]=airsbufr +srcobsfile[8]=${OBS_ROOT}/gdas1.t${HH}z.sevcsr.tm00.bufr_d +gsiobsfile[8]=seviribufr +srcobsfile[9]=${OBS_ROOT}/gdas1.t${HH}z.iasidb.tm00.bufr_d +gsiobsfile[9]=iasibufr +srcobsfile[10]=${OBS_ROOT}/gdas1.t${HH}z.gpsro.tm00.bufr_d +gsiobsfile[10]=gpsrobufr +srcobsfile[11]=${OBS_ROOT}/gdas1.t${HH}z.amsr2.tm00.bufr_d +gsiobsfile[11]=amsrebufr +srcobsfile[12]=${OBS_ROOT}/gdas1.t${HH}z.atms.tm00.bufr_d +gsiobsfile[12]=atmsbufr +srcobsfile[13]=${OBS_ROOT}/gdas1.t${HH}z.geoimr.tm00.bufr_d +gsiobsfile[13]=gimgrbufr +srcobsfile[14]=${OBS_ROOT}/gdas1.t${HH}z.gome.tm00.bufr_d +gsiobsfile[14]=gomebufr +srcobsfile[15]=${OBS_ROOT}/gdas1.t${HH}z.omi.tm00.bufr_d +gsiobsfile[15]=omibufr +srcobsfile[16]=${OBS_ROOT}/gdas1.t${HH}z.osbuv8.tm00.bufr_d +gsiobsfile[16]=sbuvbufr +srcobsfile[17]=${OBS_ROOT}/gdas1.t${HH}z.eshrs3.tm00.bufr_d +gsiobsfile[17]=hirs3bufrears +srcobsfile[18]=${OBS_ROOT}/gdas1.t${HH}z.esamua.tm00.bufr_d +gsiobsfile[18]=amsuabufrears +srcobsfile[19]=${OBS_ROOT}/gdas1.t${HH}z.esmhs.tm00.bufr_d +gsiobsfile[19]=mhsbufrears +srcobsfile[20]=${OBS_ROOT}/rap.t${HH}z.nexrad.tm00.bufr_d +gsiobsfile[20]=l2rwbufr +srcobsfile[21]=${OBS_ROOT}/rap.t${HH}z.lgycld.tm00.bufr_d +gsiobsfile[21]=larcglb +ii=1 +while [[ $ii -le 21 ]]; do + if [ -r "${srcobsfile[$ii]}" ]; then + ln -s ${srcobsfile[$ii]} ${gsiobsfile[$ii]} + echo "link source obs file ${srcobsfile[$ii]}" + fi + (( ii = $ii + 1 )) +done \end{verbatim} \end{footnotesize} @@ -827,7 +887,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -Note: For background error covariances, observation errors, and analysis available information files, we provide two sets of fixed files here, one set is based on GFS statistics and another is based on NAM statistics. For this release there is an additional setting of the ANAVINFO file for bk\_core=NMMB for both GFS and NAM statistics. +Note: For background error covariances, observation errors, and analysis variable information, we provide two sets of fixed files. One set is based on GFS statistics and another is based on NAM statistics. For this release there is an additional setting in the ANAVINFO file for "bk\_core" for both GFS and NAM statistics. \begin{footnotesize} \begin{verbatim} @@ -909,21 +969,21 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} cp $bufrtable ./prepobs_prep.bufrtable # for satellite bias correction -cp ${FIX_ROOT}/gdas1.t00z.abias.20150617 ./satbias_in -cp ${FIX_ROOT}/gdas1.t00z.abias_pc.20150617 ./satbias_pc +cp ${OBS_ROOT}/gdas1.t12z.abias ./satbias_in +cp ${OBS_ROOT}/gdas1.t12z.abias_pc ./satbias_pc_in \end{verbatim} \end{footnotesize} -Please note that in the above sample script, two fixed files related to radiance bias correction are copied from \verb|fix/| to the work directory: +Please note that in the above sample script, two files related to radiance bias correction are copied to the work directory: \begin{small} \begin{verbatim} -cp ${FIX_ROOT}/gdas1.t00z.abias.20150617 ./satbias_in -cp ${FIX_ROOT}/gdas1.t00z.abias_pc.20150617 ./satbias_pc +cp ${OBS_ROOT}/gdas1.t12z.abias ./satbias_in +cp ${OBS_ROOT}/gdas1.t12z.abias_pc ./satbias_pc_in \end{verbatim} \end{small} -There are two options on how to perform the radiance bias correction. The first method is to do the angle dependent bias correction offline and do the mass bias correction inside the GSI analysis, therefore requiring two input files: \verb|satbias_angle| corresponding to angle dependent bias correction file and \verb|satbias_in| being the input file for mass bias correction. The second method is to combine the angle dependent and mass bias correction together and do it within the GSI analysis, requiring one combined input file \verb|satbias_in|. Note that the input bias correction coefficients file \verb|satbias_in| are different for the two options, therefore it is important to use the appropriate input file for each method. The sample input files for the first method are provided with this release \verb|global_satangbias.txt| and \verb|sample.satbias|. For using the second option - combined angle dependent and mass bias correction, a sample file \verb|gdas1.t00z.abias_pc.20150617| is also provided. Users, as a starting point, might also download a GDAS satbias coefficient file from the NOMADS ftp site as the input file (starting spring 2015, the GDAS \verb|satbias| files have adopted the new format): +There are two options on how to perform the radiance bias correction. The first method is to do the angle dependent bias correction offline and do the mass bias correction inside the GSI analysis, therefore requiring two input files: \verb|satbias_angle|, corresponding to the angle dependent bias correction file and \verb|satbias_in|, being the input file for mass bias correction. The second method is to combine the angle dependent and mass bias correction together and do it within the GSI analysis, requiring one combined input file: \verb|satbias_in|. Note that the input bias correction coefficients file, \verb|satbias_in|, is different for the two options, therefore it is important to use the appropriate input file for each method. The sample input files for the first method are provided with this release: \verb|global_satangbias.txt| and \verb|sample.satbias|. To use the second option - combined angle dependent and mass bias correction, a sample file, \verb|gdas1.t00z.abias_pc.20150617|, is also provided. As a starting point, users may also download a GDAS satbias coefficient file from the NOMADS ftp site as the input file (starting in spring 2015, the GDAS \verb|satbias| files have adopted the following format): \url{ftp://nomads.ncdc.noaa.gov/GDAS/YYYYMM/YYYYMMDD/gdas1.tHHz.abias} @@ -1003,6 +1063,21 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} Note: \verb|EOF| indicates the end of GSI namelist. +The following block modifies the anavinfo file so that its vertical levels are consistent with the wrf\_inout file for WRF ARW or NMM. Users no longer need to manually modify the anavinfo file. + +\begin{footnotesize} +\begin{verbatim} +# modify the anavinfo vertical levels based on wrf_inout for WRF ARW and NMM +if [ ${bk_core} = ARW ] || [ ${bk_core} = NMM ] ; then +bklevels=`ncdump -h wrf_inout | grep "bottom_top =" | awk '{print $3}' ` +bklevels_stag=`ncdump -h wrf_inout | grep "bottom_top_stag =" | awk '{print $3}' ` +anavlevels=`cat anavinfo | grep ' sf ' | tail -1 | awk '{print $2}' ` # levels of sf, vp, u, v, t, etc +anavlevels_stag=`cat anavinfo | grep ' prse ' | tail -1 | awk '{print $2}' ` # levels of prse +sed -i 's/ '$anavlevels'/ '$bklevels'/g' anavinfo +sed -i 's/ '$anavlevels_stag'/ '$bklevels_stag'/g' anavinfo +fi +\end{verbatim} +\end{footnotesize} The following block runs GSI and checks if GSI has successfully completed. @@ -1033,7 +1108,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The following block saves the analysis results with an understandable name and adds the analysis time to some output file names. Among them, stdout contains runtime output of GSI and \verb|wrf_inout| is the analysis result. +The following block saves the analysis results with an understandable name and adds the analysis time to some output file names. Among them, "stdout" contains runtime output of GSI and \verb|wrf_inout| is the resulting analysis file. \begin{footnotesize} \begin{verbatim} @@ -1054,7 +1129,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The following block collects the diagnostic files. The diagnostic files are merged and categorized based on outer loop and data type. Setting write\_diag to true in the namelist directs GSI to write out diagnostic information for each observation station. This information is very useful to check analysis details. Please check Appendix A.2 for the tool to read and analyze these diagnostic files. +The following block collects the diagnostic files. The diagnostic files are merged and categorized based on outer loop and data type. Setting "write\_diag" to true in the namelist directs GSI to write out diagnostic information for each observation. This information is very useful to check analysis details. Please check Appendix A.2 for the tool to read and analyze these diagnostic files. \begin{footnotesize} \begin{verbatim} @@ -1103,7 +1178,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The following scripts clean the temporal intermediate files +The following scripts clean the temporary intermediate files: \begin{footnotesize} \begin{verbatim} @@ -1120,7 +1195,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The following block of the script runs only for \verb|if_observer=Yes|, which runs GSI as observation operators for EnKF and without doing minimization. The script first renames the previous diagnostics files and GSI analysis file by appending \verb| .ensmean| to the filenames to avoid these files being overwritten by the new GSI run. +The following block of the script runs only for \verb|if_observer=Yes|, which runs GSI as an observation operator for EnKF and without doing minimization. The script first renames the previous diagnostics files and GSI analysis file by appending \verb| .ensmean| to the filenames to avoid these files being overwritten by the new GSI run. \begin{footnotesize} \begin{verbatim} @@ -1157,7 +1232,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{footnotesize} -The rest of the script loops through the ensemble members to get the background ready, run GSI and check the run status: +The rest of the script loops through the ensemble members to get the background ready, run GSI, and check the run status: \begin{footnotesize} \begin{verbatim} @@ -1231,7 +1306,7 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \end{verbatim} \end{small} -If this point is reached, the GSI successfully finishes and exits with 0: +If this point is reached, the GSI successfully finishes and exits with status "0": \begin{small} \begin{verbatim} @@ -1243,85 +1318,86 @@ \subsection{Description of the Sample Regional Run Script to Run GSI} \section{GSI Analysis Result Files in Run Directory}\label{sec3.3} %------------------------------------------------------------------------------- -Once the GSI run script is set up, it is ready to be submitted just as other batch jobs. When completed, GSI will create a number of files in the run directory. Below is an example of the files generated in the run directory from one of the GSI test case runs. This case was run to perform a regional GSI analysis with a WRF ARW NetCDF background using conventional (prepbufr), radiance (AMSU-A, HIRS4, and MHS), and GPSRO data. The analysis time is 00Z 17 June 2014. Four processors were used. To make the run directory more readable, we turned on the clean option in the run script, which deleted all temporary intermediate files. +Once the GSI run script is set up, it is ready to be submitted like any other batch job. When completed, GSI will create a number of files in the run directory. Below is an example of the files generated in the run directory from one of the GSI test case runs. This case was run to perform a regional GSI analysis with a WRF-ARW NetCDF background using conventional (prepbufr), radiance (AMSU-A, HIRS4, and MHS), and GPSRO data. The analysis time is 1200Z on 13 May 2017. Four processors were used. To make the run directory more readable, we turned on the clean option in the run script, which deleted all temporary intermediate files. \begin{scriptsize} \begin{verbatim} -amsuabufr diag_mhs_n19_ges.2014061700 fort.226 -amsubbufr errtable fort.227 -anavinfo fit_p1.2014061700 fort.228 -berror_stats fit_q1.2014061700 fort.229 -convinfo fit_rad1.2014061700 fort.230 -diag_amsua_metop-a_anl.2014061700 fit_t1.2014061700 gpsrobufr -diag_amsua_metop-a_ges.2014061700 fit_w1.2014061700 gsi.exe -diag_amsua_metop-b_anl.2014061700 fort.201 gsiparm.anl -diag_amsua_metop-b_ges.2014061700 fort.202 hirs4bufr -diag_amsua_n15_anl.2014061700 fort.203 l2rwbufr -diag_amsua_n15_ges.2014061700 fort.204 list_run_directory -diag_amsua_n18_anl.2014061700 fort.205 mhsbufr -diag_amsua_n18_ges.2014061700 fort.206 ozinfo -diag_amsua_n19_anl.2014061700 fort.207 pcpbias_out -diag_amsua_n19_ges.2014061700 fort.208 pcpinfo -diag_conv_anl.2014061700 fort.209 prepbufr -diag_conv_ges.2014061700 fort.210 prepobs_prep.bufrtable -diag_hirs4_metop-a_anl.2014061700 fort.211 satbias_ang.out -diag_hirs4_metop-a_ges.2014061700 fort.212 satbias_in -diag_hirs4_metop-b_anl.2014061700 fort.213 satbias_out -diag_hirs4_metop-b_ges.2014061700 fort.214 satbias_out.int -diag_hirs4_n19_anl.2014061700 fort.215 satbias_pc -diag_hirs4_n19_ges.2014061700 fort.217 satbias_pc.out -diag_mhs_metop-a_anl.2014061700 fort.218 satinfo -diag_mhs_metop-a_ges.2014061700 fort.219 stdout -diag_mhs_metop-b_anl.2014061700 fort.220 stdout.anl.2014061700 -diag_mhs_metop-b_ges.2014061700 fort.221 wrf_inout -diag_mhs_n18_anl.2014061700 fort.223 wrfanl.2014061700 -diag_mhs_n18_ges.2014061700 fort.224 -diag_mhs_n19_anl.2014061700 fort.225 +amsuabufr fort.206 hirs3bufrears +amsuabufrears fort.207 hirs4bufr +anavinfo fort.208 l2rwbufr +atmsbufr fort.209 larcglb +berror_stats fort.210 list_run_directory +convinfo fort.211 mhsbufr +diag_amsua_n15_anl.2017051312 fort.212 mhsbufrears +diag_amsua_n15_ges.2017051312 fort.213 omibufr +diag_amsua_n18_anl.2017051312 fort.214 ozinfo +diag_amsua_n18_ges.2017051312 fort.215 pcpbias_out +diag_amsua_n19_anl.2017051312 fort.217 pcpinfo +diag_amsua_n19_ges.2017051312 fort.218 prepbufr +diag_conv_anl.2017051312 fort.219 prepobs_prep.bufrtable +diag_conv_ges.2017051312 fort.220 radar_supobs_from_level2 +diag_hirs4_n19_anl.2017051312 fort.221 satbias_angle +diag_hirs4_n19_ges.2017051312 fort.223 satbias_ang.out +diag_mhs_n18_anl.2017051312 fort.224 satbias_in +diag_mhs_n18_ges.2017051312 fort.225 satbias_out +diag_mhs_n19_anl.2017051312 fort.226 satbias_out.int +diag_mhs_n19_ges.2017051312 fort.227 satbias_pc_in +errtable fort.228 satbias_pc.out +fit_p1.2017051312 fort.229 satinfo +fit_q1.2017051312 fort.230 satwnd +fit_rad1.2017051312 fort.232 sbuvbufr +fit_t1.2017051312 fort.233 seviribufr +fit_w1.2017051312 fort.234 ssmirrbufr +fort.201 gimgrbufr stdout +fort.202 gomebufr stdout.anl.2017051312 +fort.203 gpsrobufr wrfanl.2017051312 +fort.204 gsi.exe wrf_inout +fort.205 gsiparm.anl \end{verbatim} \end{scriptsize} It is important to know which files hold the GSI analysis results, standard output, and diagnostic information. We will introduce these files and their contents in detail in the following chapter. The following is a brief list of what these files contain: \begin{itemize} -\item \textit{stdout.anl.2014061700/stdout}: standard text output file, which is a link to stdout with the analysis time appended. This is the most commonly used file to check the GSI analysis processes as well as basic and important information about the analyses. We will explain the contents of stdout in Section 4.1 and users are encouraged to read this file in detail to become familiar with the order of GSI analysis processing. -\item \textit{wrfanl.2014061700/wrf\_inout}: analysis results if GSI completes successfully \- it exists only if using WRF for background. This is a link to \textit{wrf\_inout} with the analysis time appended. The format is the same as the background file. + \item \textit{stdout} or \textit{stdout.anl.(time)}: standard text output file. \textit{stdout.anl.(time)} is a link to \textit{stdout} with the analysis time appended. This is the most commonly used file to check the GSI analysis processes and contains basic and important information about the analyses. We will explain the contents of the \textit{stdout} file in Section 4.1 and users are encouraged to read this file in detail to become familiar with the order of GSI analysis processing. + \item \textit{wrf\_inout} or \textit{wrfanl.(time)}: analysis results if GSI completes successfully. It exists only if using WRF for the background. The \textit{wrfanl.(time)} file is a link to \textit{wrf\_inout} with the analysis time appended. The format is the same as the background file. \item \textit{diag\_conv\_anl.(time)}: binary diagnostic files for conventional and GPS RO observations at the final analysis step (analysis departure for each observation). -\item \textit{diag\_conv\_ges.(time)}: binary diagnostic files for conventional and GPS RO observations before initial analysis step (background departure for each observation) -\item \textit{diag\_(instrument\_satellite)\_anl}: diagnostic files for satellite radiance observations at final analysis step. -\item \textit{diag\_(instrument\_satellite)\_ges}: diagnostic files for satellite radiance observations before initial analysis step. +\item \textit{diag\_conv\_ges.(time)}: binary diagnostic files for conventional and GPS RO observations before the initial analysis step (background departure for each observation) +\item \textit{diag\_(instrument\_satellite)\_anl}: diagnostic files for satellite radiance observations at the final analysis step. +\item \textit{diag\_(instrument\_satellite)\_ges}: diagnostic files for satellite radiance observations before the initial analysis step. \item \textit{gsiparm.anl}: GSI namelist, generated by the run script. \item \textit{fit\_(variable).(time)}: links to fort.2?? with meaningful names (variable name plus analysis time). They are statistic results of observation departures from background and analysis results according to observation variables. Please see Section 4.5 for more details. -\item \textit{fort.220}: output from the inner loop minimization (in \textit{pcgsoi.f90}). Please see Section 4.6 for details. -\item \textit{anavinfo}: info file to set up control variables, state variables, and background variables. Please see Advanced GSI User\textquotesingle s Guide for details. +\item \textit{fort.220}: output from the inner loop minimization (in \textit{pcgsoi.f90}). Please see Section 4.6 for details. +\item \textit{anavinfo}: info file to set up control, state, and background variables. Please see the Advanced GSI User\textquotesingle s Guide for details. \item \textit{*info} (\textit{convinfo},\textit{satinfo}, \dots): info files that control data usage. Please see Section \ref{sec4.3} for details. \item \textit{berror\_stats} and \textit{errtable}: background error file (binary) and observation error file (text). -\item \textit{*bufr}: observation BUFR files linked to the run directory Please see Section 3.1 for details. +\item \textit{*bufr}: observation BUFR files linked to the run directoryi. Please see Section 3.1 for details. \item \textit{satbias\_in}: the input coefficients of bias correction for satellite radiance observations. \item \textit{satbias\_out}: the output coefficients of bias correction for satellite radiance observations after the GSI run. \item \textit{satbias\_pc}: the input coefficients of bias correction for passive satellite radiance observations. -\item \textit{list\_run\_directory} : the complete list of files in the run directory before cleaning the run directory. This is generated by the GSI run script. +\item \textit{list\_run\_directory} : the complete list of files in the run directory before cleaning takes place. This is generated by the GSI run script. \end{itemize} -The \verb|diag| files, such as \verb|diag_(instrument_satellite)_anl.(time)| and \verb|diag_conv_anl.(time)|, contain important information about the data used in the GSI, including observation departure from analysis results for each observation (O-A). Similarly, \verb|diag_conv_ges| and \verb|diag_(instrumen_satellite)_ges.(time)| include observation innovation for each observation (O-B). These files can be very helpful in understanding the detailed impact of data on the analysis. A tool is provided to process these files, which is introduced in Appendix A.2. +The \verb|diag| files, such as \verb|diag_(instrument_satellite)_anl.(time)| and \verb|diag_conv_anl.(time)|, contain important information about the data used in the GSI, including observation departure from analysis results for each observation (O-A). Similarly, \verb|diag_conv_ges| and \verb|diag_(instrumen_satellite)_ges.(time)| include the observation innovation for each observation (O-B). These files can be very helpful in understanding the detailed impact of data on the analysis. A tool is provided to process these files, which is introduced in Appendix A.2. -There are many intermediate files in this directory during the running stage or if the GSI run crashes; the complete list of files before cleaning is saved in a file \verb|list_run_directory|. Some knowledge about the content of these files is very helpful for debugging if the GSI run crashes. Please check the table \ref{t37} for the meaning of these files. (Note: you may not see all the files in the list because different observational data are used. Also, the fixed files prepared for a GSI run, such as CRTM coefficient files, are not included.) +There are many intermediate files in this directory while GSI is running or if the run crashes. The complete list of files in the directory (prior to cleaning) is saved in file \verb|list_run_directory|. Some knowledge about the content of these files is very helpful for debugging if the GSI run crashes. Please check table \ref{t37} for the meaning of these files. (Note: you may not see all the files in the list because different observational data are used. Also, the fixed files prepared for a GSI run, such as CRTM coefficient files, are not included.) \begin{table}[htbp] \centering -\caption{The list of GSI intermediate files} +\caption{List of GSI intermediate files} \begin{tabular}{|p{5cm}|p{10cm}|} \hline \hline File name & Content \\ \hline -sigf03 & This is a temporal file holding binary format background files (typically sigf03, sigf06 and sigf09 if FGAT used). When you see this file, at the minimum, a background file was successfully read in.\\ +sigf03 & This is a temporary file, holding binary format background files (typically sigf03, sigf06 and sigf09 if FGAT used). When you see this file, at the minimum, a background file was successfully read in.\\ \hline -siganl & Analysis results in binary format. When this file exists, the analysis part has finished.\\ +siganl & Analysis results in binary format. When this file exists, the analysis has finished.\\ \hline -pe????.(conv or instrument\_satellite)\_(outer loop) & Diagnostic files for conventional and satellite radiance observations at each outer loop and each sub-domains (????=subdomain id)\\ +pe????.(conv or instrument\_satellite)\_(outer loop) & Diagnostic files for conventional and satellite radiance observations at each outer loop and each sub-domain (????=subdomain id)i.\\ \hline -obs\_input.???? & Observation scratch files (each file contains observations for one observation type within whole analysis domain and time window. ????=observation type id in namelist)\\ +obs\_input.???? & Observation scratch files (each file contains observations for one observation type within the whole analysis domain and time window. ????=observation type id in namelist).\\ \hline -pcpbias\_out & Output precipitation bias correction file\\ +pcpbias\_out & Output precipitation bias correction file.\\ \hline \end{tabular} \label{t37} @@ -1333,31 +1409,31 @@ \section{GSI Analysis Result Files in Run Directory}\label{sec3.3} \section{Introduction to Frequently Used GSI Namelist Options} %------------------------------------------------------------------------------- -The complete namelist options and their explanations are listed in Advanced GSI User\textquotesingle s Guide Appendix A. For most GSI analysis applications, only a few namelist variables need to be changed. Here we introduce frequently used variables for regional analyses: +The complete namelist options and their explanations are listed in Appendix A of the Advanced GSI User\textquotesingle s Guide. For most GSI analysis applications, only a few namelist variables need to be changed. Here we introduce frequently used variables for regional analyses: %------------------------------------------------------------------------------- -\subsection{Set Up the Number of Outer Loop and Inner Loop} +\subsection{Set Up the Number of Outer and Inner Loops} %------------------------------------------------------------------------------- To change the number of outer loops and the number of inner iterations in each outer loop, the following three variables in the namelist need to be modified: \begin{itemize} -\item \verb|miter|: number of outer loops of analysis. -\item \verb|niter(1)|: maximum iteration number of inner loop iterations for the 1st outer loop. The inner loop will stop when it reaches this maximum number, or reaches the convergence threshold, or when it fails to converge. -\item \verb|niter(2)|: maximum iteration number of inner loop iterations for the 2nd outer loop. -\item If \verb|miter| is larger than 2, repeat \verb|niter| with larger index. +\item \verb|miter|: number of outer analysis loops. +\item \verb|niter(1)|: maximum iteration number of inner loop iterations for the 1\textsuperscript{st} outer loop. The inner loop will stop when it reaches this maximum number, when it reaches the convergence threshold, or when it fails to converge. +\item \verb|niter(2)|: maximum iteration number of inner loop iterations for the 2\textsuperscript{nd} outer loop. +\item If \verb|miter| is larger than two, repeat \verb|niter| with larger index. \end{itemize} %------------------------------------------------------------------------------- \subsection{Set Up the Analysis Variable for Moisture} %------------------------------------------------------------------------------- -There are two moisture analysis variable options. It is decided by the namelist variable: +There are two moisture analysis variable options. It is based on the following namelist variable: \verb|qoption = 1 or 2|: \begin{itemize} -\item If \verb|qoption=1|, the moisture analysis variable is pseudo-relative humidity. The saturation specific humidity, qsatg, is computed from the guess and held constant during the inner loop. Thus, the RH control variable can only change via changes in specific humidity, q. -\item If \verb|qoption=2|, the moisture analysis variable is normalized RH. This formulation allows RH to change in the inner loop via changes to surface pressure (pressure), temperature, or specific humidity. +\item If \verb|qoption=1|, the moisture analysis variable is pseudo-relative humidity. The saturation specific humidity, qsatg, is computed from the guess and held constant during the inner loop. Thus, the relative humidity control variable can only change via changes in specific humidity, q. +\item If \verb|qoption=2|, the moisture analysis variable is normalized relative humidity. This formulation allows relative humidity to change in the inner loop via changes to surface pressure, temperature, or specific humidity. \end{itemize} %------------------------------------------------------------------------------- @@ -1368,10 +1444,10 @@ \subsection{Set Up the Background File} \begin{itemize} \item \verb|regional|: if true, perform a regional GSI run using either ARW or NMM inputs as the background. If false, perform a global GSI analysis. If either \verb|wrf_nmm_regional| or \verb|wrf_mass_regional| are true, it will be set to true. -\item \verb|wrf_nmm_regional|: if true, background comes from WRF NMM. When using other background fields, set it to false. -\item \verb|wrf_mass_regional|: if true, background comes from WRF ARW. When using other background fields, set it to false. -\item \verb|nems_nmmb_regional|: if true, background comes from NMMB. When using other background fields, set it to false. -\item \verb|netcdf|: if true, WRF files are in NetCDF format, otherwise WRF files are in binary format. This option only works for performing a regional GSI analysis. +\item \verb|wrf_nmm_regional|: if true, the background comes from WRF-NMM. When using other background fields, set it to false. +\item \verb|wrf_mass_regional|: if true, the background comes from WRF-ARW. When using other background fields, set it to false. +\item \verb|nems_nmmb_regional|: if true, the background comes from NMMB. When using other background fields, set it to false. +\item \verb|netcdf|: if true, WRF files are in NetCDF format, otherwise WRF files are in binary format. This option only works for a regional GSI analysis. \end{itemize} %------------------------------------------------------------------------------- @@ -1381,9 +1457,9 @@ \subsection{Set Up the Output of Diagnostic Files} The following variables tell the GSI to write out diagnostic results in certain loops: \begin{itemize} -\item \verb|write_diag(1)|: if true, write out diagnostic data in the beginning of the analysis, so that we can have information on Observation $-$ Background (O-B) . -\item \verb|write_diag(2)|: if true, write out diagnostic data at the end of the 1st (before the 2nd outer loop starts) . -\item \verb|write_diag(3)|: if true, write out diagnostic data at the end of the 2nd outer loop (after the analysis finishes if the outer loop number is 2), so that we can have information on Observation $-$ Analysis (O-A) +\item \verb|write_diag(1)|: if true, write out diagnostic data in the beginning of the analysis, so that we can have information on observation $-$ background (O-B) differences. +\item \verb|write_diag(2)|: if true, write out diagnostic data at the end of the 1\textsuperscript{st} outer loop (before the 2\textsuperscript{nd} outer loop starts). +\item \verb|write_diag(3)|: if true, write out diagnostic data at the end of the 2\textsuperscript{nd} outer loop (after the analysis finishes if the outer loop number is two), so that we can have information on observation $-$ analysis (O-A) differences. \end{itemize} Please check appendix A.2 for the tools to read the diagnostic files. @@ -1414,10 +1490,10 @@ \subsection{Set Up the GSI Recognized Observation Files} \end{scriptsize} \begin{itemize} -\item \verb|dfile|: GSI recognized observation file name. The observation file contains observations used for a GSI analysis. This file can include several observation variables from different observation types. The file name in this parameter will be read in by GSI. This name can be changed as long as the name in the link from the BUFR/PrepBUFR file in the run scripts also changes correspondingly. -\item \verb|dtype|: analysis variable name that GSI can read in and handle. Please note this name should be consistent with that used in the GSI code. +\item \verb|dfile|: GSI recognized observation file name. The observation file contains observations used for a GSI analysis. This file can include several observation variables from different observation types. The file name listed by this parameter will be read in by GSI. This name can be changed as long as the name in the link from the BUFR/PrepBUFR file in the run scripts also changes correspondingly. +\item \verb|dtype|: analysis variable name that GSI can read in. Please note this name should be consistent with that used in the GSI code. \item \verb|dplat|: sets up the observation platform for a certain observation, which will be read in from the file \verb|dfile|. -\item \verb|dsis|: sets up data name (including both data type and platform name) used inside GSI. +\item \verb|dsis|: sets up the data name (including both data type and platform name) used inside GSI. \end{itemize} Please see Section 4.3 for examples and explanations of these variables. @@ -1426,7 +1502,7 @@ \subsection{Set Up the GSI Recognized Observation Files} \subsection{Set Up Observation Time Window} %------------------------------------------------------------------------------- -In the namelist section \verb|OBS_INPUT|, use \verb|time_window_max| to set maximum half time window (hours) for all data types. In the \verb|convinfo| file, you can use the column twindow to set the half time window for a certain data type (hours). For conventional observations, only observations within the smaller window of these two will be kept for further processing. For others, observations within \verb|time_window_max| will be kept for further processing. +In the namelist section \verb|OBS_INPUT|, use \verb|time_window_max| to set the maximum half time window (hours) for all data types. In the \verb|convinfo| file, you can use the column "twindow" to set the half time window for a certain data type (hours). For conventional observations, only observations within the smaller window of these two will be kept for further processing. For others, observations within \verb|time_window_max| will be kept for further processing. %------------------------------------------------------------------------------- \subsection{Set Up Data Thinning} @@ -1434,7 +1510,7 @@ \subsection{Set Up Data Thinning} 1) Radiance data thinning -Radiance data thinning is controlled through two GSI namelist variables in the section \verb| &OBS_INPUT|. Below is an example of the section: +Radiance data thinning is controlled through two GSI namelist variables in the section \verb| &OBS_INPUT|. Below is an example: \begin{scriptsize} \begin{verbatim} @@ -1454,14 +1530,14 @@ \subsection{Set Up Data Thinning} \end{verbatim} \end{scriptsize} -The two namelist variables that control the radiance data thinning are real array dmesh in the 1st line and the dthin values in the 6th column. The dmesh gives a set of the mesh sizes in unit km for radiance thinning grids, while the dthin defines if the data type it represents needs to be thinned and which thinning grid (mesh size) to use. If the value of \verb|dthin| is: +The two namelist variables that control the radiance data thinning are real array "dmesh" in the 1\textsuperscript{st} line and the "dthin" values in the 6\textsuperscript{th} column. The "dmesh" array sets mesh sizes for radiance thinning grids in kilometers, while "dthin" defines if the data type it represents needs to be thinned and which thinning grid (mesh size) to use. If the value of \verb|dthin| is: \begin{itemize} -\item an integer less than or equal to 0, no thinning is needed -\item an integer larger than 0, this kind of radiance data will be thinned in a thinning grid with the mesh size defined as dmesh (dthin). +\item an integer less than or equal to zero, no thinning is needed +\item an integer larger than zero, this kind of radiance data will be thinned using the mesh size defined as dmesh (dthin). \end{itemize} -The following gives several thinning examples defined by the above sample \verb| &OBS_INPUT| section: +The following section provides several thinning examples defined by the above sample \verb| &OBS_INPUT| section: \begin{itemize} \item Data type \verb|ps| from prepbufr: no thinning because \verb|dthin=0| \item Data type \verb|gps_ref| from gpsrobufr: no thinning because \verb|dthin=0| diff --git a/doc/GSI_user_guide/gsi_ch4.tex b/doc/GSI_user_guide/gsi_ch4.tex index 729829797..e763fca70 100644 --- a/doc/GSI_user_guide/gsi_ch4.tex +++ b/doc/GSI_user_guide/gsi_ch4.tex @@ -1,9 +1,9 @@ -\chapter{GSI Diagnostics and Tuning} +\chapter{GSI Diagnostics and Tuning}\label{gsi_diag} \setlength{\parskip}{12pt} -The guidance in this chapter will help users to understand how and where to check the output from GSI to determine whether a run was successful. Properly checking the GSI output will also provide useful information to diagnose potential errors in the system. The chapter starts with an introduction to the content and structure of the GSI standard output (\textbf{stdout}). It continues with the use of a single observation to check the features of the GSI analysis. Then, observation usage control, analysis domain partition, fit files, and the optimization process will all be presented from information within the GSI output files (including stdout). +The guidance in this chapter will help users understand how and where to check output from GSI to determine whether a run was successful. Properly checking the GSI output will also provide useful information to diagnose potential errors in the system. This chapter starts with an introduction to the content and structure of the GSI standard output file: (\textbf{stdout}). It continues with the use of a single observation to check the features of the GSI analysis. Then, observation usage control, analysis domain partitioning, fit files, and the optimization process will all be presented from information within the GSI output files (including \textbf{stdout}). -This chapter follows the online case example for 2014061700. This case uses a WRF-ARW NetCDF file as the background and analyzes several observations typical for operations, including most conventional observation data, several radiance data (AMSU-A, HIRS4, and MHS), and GPSRO data. The case was run on a Linux cluster supercomputer, using 4 processors. Users can follow this test to reproduce the following results by visiting: +This chapter follows the online case example for 2014061700. This case uses a WRF-ARW NetCDF file as the background and analyzes several observations typical for operations, including most conventional observation data, several radiance data sets (AMSU-A, HIRS4, and MHS), and GPSRO data. The case was run on a Linux cluster supercomputer, using four processors. Users can execute this test to reproduce the following results by visiting: \begin{center} \url{http://www.dtcenter.org/com-GSI/users/tutorial/index.php} @@ -16,25 +16,25 @@ \section{Understanding Standard Output (\textit{stdout})} In Section \ref{sec3.3}, we listed the files present in the GSI run directory following a successful GSI analysis and briefly introduced the contents of several important files. Of these, \textbf{stdout} is the most useful because critical information about the GSI analysis can be obtained from the file. From \textbf{stdout}, users can check if the GSI has successfully completed, if optimal iterations look correct, and if the background and analysis fields are reasonable. Understanding the content of this file can also be very helpful for users to find where and why the GSI failed if it crashes. -The structure of \textbf{stdout} follows the typical steps in a meteorological data analysis system: +The structure of \textbf{stdout} follows the typical steps of a meteorological data analysis system: \begin{enumerate} \item Read in all data and prepare analysis: \begin{itemize} \item Read in configuration (namelist) -\item Read in background +\item Read in the background \item Read in observations \item Partition domain and data for parallel analysis \item Read in constant fields (fixed files) \end{itemize} \item Calculate observation innovations \item Optimal iteration (analysis) -\item Save analysis result +\item Save analysis results \end{enumerate} -In this section, the detailed structure and content of \textbf{stdout} are explained using the v3.5 online example case: 2014061700. To keep the output concise and make it more readable, most repeated content was deleted (shown by the dotted line). For the same reason, the accuracy of some numbers has been reduced to avoid line breaks in \textbf{stdout}. +In this section, the detailed structure and content of \textbf{stdout} are explained using the online example case: 2014061700. To keep the output concise and make it more readable, most repeated content was deleted (shown with a dotted line). For the same reason, the precision of some numbers has been reduced to avoid line breaks in \textbf{stdout}. -The following indicates the start of the GSI analysis. It shows the beginning time of this run: +The following indicates the start of the GSI analysis. It shows the date and time that GSI started running: \begin{scriptsize} \begin{verbatim} @@ -44,7 +44,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -This part shows the content of anavinfo, a list of state and control variables. +The following shows the content of anavinfo, a list of state and control variables: \begin{scriptsize} \begin{verbatim} @@ -101,7 +101,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -Next is the content of all namelist variables used in this analysis. The 1st part shows the 4DVAR setups. Please note that while this version of the GSI includes some 4DVAR interface, it is untested in this release. The general set up for the GSI analysis (3DVAR) is located in the \verb|&SETUP| section of the GSI namelist. Please check Appendix B for definitions and default values of each namelist variable. +Next is the content of all namelist variables used in this analysis. The 1st part shows 4DVAR setup information. Please note that while this version of the GSI includes a 4DVAR option, it remains untested. The general setup for the GSI analysis (3DVAR) is located in the \verb|&SETUP| section of the GSI namelist. Please check Appendix B for definitions and default values of each namelist variable. \begin{scriptsize} \begin{verbatim} @@ -193,16 +193,16 @@ \section{Understanding Standard Output (\textit{stdout})} ... \end{verbatim} \end{scriptsize} -This version of GSI attempts to read multi-time-level backgrounds for option FGAT (First Guess at Appropriate Time), however we only have provided one in this test case. Therefore, there is error information at the beginning of the reading background portion: +This version of GSI attempts to read multiple time level backgrounds for option FGAT (First Guess at Appropriate Time), however we only have provided one time level in this test case. Therefore, there is an error while reading background information: \begin{scriptsize} \begin{verbatim} CONVERT_NETCDF_MASS: problem with flnm1 = wrf_inou1, Status = -1021 \end{verbatim} \end{scriptsize} -We can ignore these errors for missing files \textit{wrf\_inou1}, \textit{wrf\_inou2}, \ldots , \textit{wrf\_inou9} because we only ran 3DVAR with one background. +We can ignore errors for missing files \textit{wrf\_inou1}, \textit{wrf\_inou2}, \ldots, and \textit{wrf\_inou9}, because we are only running 3DVAR with one background. -Next, the background fields for the analysis are read in and the maximum, minimum and median values of the fields at each vertical level are displayed. Here, only part of the variables ZNU and T are shown, and all other variables read by the GSI are listed only as the variable name in the NetCDF file(rmse\_var = T). The maximum and minimum values are useful for a quick verification that the background fields have been read successfully. From this section, we also know the time (\verb|iy,m,d,h,m,s|) and dimension (\verb|nlon,lat,sig_regional|) of the background field. +Next, the background fields for the analysis are read in, and the maximum, minimum, and median values of the fields at each vertical level are displayed. Here, only part of the variables ZNU and T are shown, with all other variables read by the GSI listed solely as the variable name in the NetCDF file(rmse\_var = T). Maximum and minimum values are useful for a quick verification that the background fields have been read successfully. From this section, we also know the time (\verb|iy,m,d,h,m,s|) and dimension (\verb|nlon,lat,sig_regional|) of the background field. \begin{scriptsize} \begin{verbatim} @@ -326,7 +326,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -For some variables, the following NETCDF error information might show up when the variables are not in the background fields. These errors don\textquotesingle t affect the GSI run so you can ignore them. +For some variables, the following NETCDF error information might show up when they are not in the background fields. These errors don\textquotesingle t affect the GSI run so you can ignore them. \begin{scriptsize} \begin{verbatim} @@ -343,21 +343,21 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -Again, some error information on missing background files shows up. Ignore if you are not doing FGAT: +Again, some error information on missing background files shows up. Ignore if you are not doing FGAT. \begin{scriptsize} \begin{verbatim} CONVERT_NETCDF_MASS: problem with flnm1 = wrf_inou4, Status = -1021 \end{verbatim} \end{scriptsize} -Following this is information on the byte order of the binary background files. Because we used a NetCDF file, there is no need to be concerned with byte order. When using a binary format background, byte-order can be a problem. Beginning with the release version v3.2, GSI can automatically check the background byte-order and read it in right order: +Following this is information on the byte order of the binary background files. Since we used a NetCDF file, there is no need to be concerned with byte order. When using a binary format background, byte-order can be a problem. Beginning with the release version v3.2, GSI can automatically check the background byte-order and read it in the right order: \begin{scriptsize} \begin{verbatim} in convert_regional_guess, for wrf arw binary input, byte_swap= F \end{verbatim} \end{scriptsize} -Information on setting the grid related variables, and the beginning and ending indices for thread 1: +Information on setting the grid related variables, and the beginning and ending indices for thread one: \begin{scriptsize} \begin{verbatim} INIT_GRID_VARS: number of threads 1 @@ -365,7 +365,7 @@ \section{Understanding Standard Output (\textit{stdout})} 168 \end{verbatim} \end{scriptsize} -Information on the initial pointer location for each variable in the Jacobian for the use of the satellite radiance data: +Information on the initial pointer location for each variable in the Jacobian for the use of satellite radiance data: \begin{scriptsize} \begin{verbatim} Vars in Rad-Jacobian (dims) @@ -399,7 +399,7 @@ \section{Understanding Standard Output (\textit{stdout})} using restart file date = 2014 6 17 0 \end{verbatim} \end{scriptsize} -Read in radar station information and generate superobs for radar Level-II radial velocity. This case didn\textquotesingle t have radar Level-II velocity data linked. There is warning information about opening the file but this will not impact the rest of the GSI analysis. +Read in radar location information and generate superobs for radar level-II radial velocity. This case didn\textquotesingle t have radar level-II velocity data linked, therefore there is warning about when opening the file, but this will not impact the rest of the GSI analysis. \begin{scriptsize} \begin{verbatim} RADAR_BUFR_READ_ALL: analysis time is 2014 6 17 @@ -418,7 +418,7 @@ \section{Understanding Standard Output (\textit{stdout})} PCPINFO_READ: no pcpbias file. set predxp=0.0 \end{verbatim} \end{scriptsize} -Read in and show the content of the conventional observation information files (\textit{convinfo}; see Section \ref{sec4.3} for details). Here is part of the stdout shown \textit{convinfo}: +Read in and show the content of the conventional observation information file (\textit{convinfo}; see Section \ref{sec4.3} for details). Here is the part of the \textbf{stdout} file showing information from \textit{convinfo}: \begin{tiny} \begin{verbatim} @@ -443,7 +443,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{tiny} -Starting subroutine \textit{glbsoi} and information on reading in background fields from intermediate binary file \textit{sigf03} and partitioning the whole 2D field into subdomains for parallel analysis: +Starting subroutine \textit{glbsoi} with information on reading in background fields from the intermediate binary file \textit{sigf03} and partitioning the whole 2D field into subdomains for parallel analysis: \begin{scriptsize} \begin{verbatim} @@ -468,7 +468,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -Show observation observer is successfully initialized and inquire about the control vectors (space for analysis variables). +Show observation observer as successfully initialized and inquire about the control vectors (space for analysis variables). \begin{scriptsize} \begin{verbatim} @@ -488,9 +488,9 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -The following information is related to observation ingest processes, which are distributed over all the processors with each processor reading in at least one observation type. To speed up reading process, some of the large datasets will use more than one (ntasks) processor to read. +The following information is related to the observation ingest processes, which is distributed over all the processors with each processor reading in at least one observation type. To speed up the reading process, some of the large datasets will use more than one (ntasks) processor for the ingest process. -Before reading in the data from BUFR files, GSI checks the file status on whether the observation time matches the analysis time and how namelist option \textit{offtime\_data} is set (can be used to turn off the time consistent check between observation and analysis time). This step also checks for consistency between the satellite radiance data types in the BUFR files and the usage setups in the \textit{satinfo} files. The following shows stdout information from this step: +Before reading in data from BUFR files, GSI checks the file status to insure the observation time matches the analysis time and whether the namelist option \textit{offtime\_data} is set (can be used to turn off the time consistency check between observation and analysis time). This step also checks for consistency between the satellite radiance data types in the BUFR files and the usage setups in the \textit{satinfo} files. The following shows \textbf{stdout} information from this step: \begin{scriptsize} \begin{verbatim} @@ -533,7 +533,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -The list of observation types that will be read in and processors used to read them: +The list of observation types that were read in and processors used to read them: \begin{scriptsize} \begin{verbatim} @@ -559,7 +559,7 @@ \section{Understanding Standard Output (\textit{stdout})} READ_OBS: read 30 amsua amsua_metop-b using ntasks= 1 2 2 0 \end{verbatim} \end{scriptsize} -Display basic statistics for full horizontal surface fields (If radiance BUFR files are not linked, this section will not be in the stdout file): +Display basic statistics for full horizontal surface fields (If radiance BUFR files are not linked, this section will not be in the \textbf{stdout} file): \begin{scriptsize} \begin{verbatim} @@ -581,7 +581,7 @@ \section{Understanding Standard Output (\textit{stdout})} ================================================================================ \end{verbatim} \end{scriptsize} -Loop over all data files to read in observations, also reads in rejection list for surface observations and show GPS observations outside the time window: +Loop over all data files to read in observations, also read in rejection list for surface observations and show GPS observations outside the time window: \begin{tiny} \begin{verbatim} @@ -628,10 +628,9 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{tiny} -Using the above output information, many details on the observations can be obtained. For example, the last line indicates that subroutine \textit{READ\_BUFRTOVS} was called to read in NOAA-19 AMSU-A (\verb|sis=amsua_n19|) from the BUFR file \textit{amsuabufr} (\verb|file=amsuabufr|). Furthermore, this kind of data has 20370 observations in the file (\verb|nread=20370|) and 16912 in analysis domain and time-window (\verb|ndata=16912|). The data was thinned on a 60 km coarse grid (\verb|rmesh=60.000000|). +Using the above output information, many details on the observations can be obtained. For example, the last line indicates that subroutine \textit{READ\_BUFRTOVS} was called to read in NOAA-19 AMSU-A (\verb|sis=amsua_n19|) data from the BUFR file \textit{amsuabufr} (\verb|file=amsuabufr|). Furthermore, there are 20370 observations in this file (\verb|nread=20370|) and 16912 in the analysis domain and within the time window (\verb|ndata=16912|). The data was thinned on a 60 km coarse grid (\verb|rmesh=60.000000|). - -The next step partitions observations into subdomains. The observation distribution is summarized below by listing the number of observations for each observation variable in each subdomain (see Section \ref{sec4.4} for more information): +The next step partitions observations into subdomains. The observation distribution is summarized below by listing the number of observations for each variable per subdomain (see Section \ref{sec4.4} for more information): \begin{scriptsize} \begin{verbatim} @@ -669,9 +668,9 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -From this point forward in the stdout, the output shows many repeated entries. This is because the information is written from inside the outer loop. Typically the outer loop is iterated twice. +From this point forward in the \textbf{stdout} file, the output shows many repeated entries. This is because the information is written from inside the outer loop. Typically the outer loop is run twice. -For each outer loop, the work begins with the calculation of the observation innovation. This calculation is done by the subroutine setuprhsall, which sets up the right hand side (rhs) of the analysis equation. This information is contained within the stdout file, which is shown in the following sections: +For each outer loop, the work begins with the calculation of the observation innovation. This calculation is done by the subroutine \textbf{setuprhsall}, which sets up the right hand side (rhs) of the analysis equation. This information is contained within the \textbf{stdout} file, which is shown in the following sections: Start the first outer analysis loop: @@ -708,7 +707,7 @@ \section{Understanding Standard Output (\textit{stdout})} ... \end{verbatim} \end{scriptsize} -In the above section, when computing the radiance observation innovation, information on reading in CRTM coefficients follows SETUPALL information. In stdout, only information related to available radiance data are printed. The complete innovation can be found in the diagnostic files for each observation (for details see Appendix A.2): +In the above section, when computing the radiance observation innovation, information on reading in CRTM coefficients follows SETUPALL information. In the \textbf{stdout} file, only information related to available radiance data are printed. The complete innovation information can be found in the diagnostic files for each observation (for details see Appendix A.2): \begin{scriptsize} \begin{verbatim} @@ -760,7 +759,7 @@ \section{Understanding Standard Output (\textit{stdout})} update_guess: successfully complete \end{verbatim} \end{tiny} -At the end of the 1st outer loop, print some diagnostics about the guess fields after adding the analysis increment to the guess and diagnostics about the analysis increment: +At the end of the 1\textsuperscript{st} outer loop, print some diagnostics about the analysis increments as well as information on the guess fields after adding the analysis increments to the background: \begin{scriptsize} \begin{verbatim} ================================================================================ @@ -810,7 +809,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -When calculating the radiance data innovation, there is no need to read in CRTM coefficients again because they were already read in the first outer loop: +When calculating the radiance data innovation, there is no need to read in CRTM coefficients again because they were already read in during the first outer loop: \begin{scriptsize} \begin{verbatim} SETUPALL:,obstype,isis,nreal,nchanl= ps ps 20 0 @@ -874,7 +873,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{tiny} -Diagnostics of the analysis results after adding the analysis increment to the guess and diagnostics about the analysis increment: +Diagnostics of the analysis results after adding the analysis increment to the guess, as well as diagnostics about the analysis increments: \begin{scriptsize} \begin{verbatim} ================================================================================ @@ -908,7 +907,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -Because the outer loop is set to 2, the completion of the 2nd outer loop is the end of the analysis. The next step is to save the analysis results. Again, only a portion of variable T is shown and all other variables are listed according to variable name in the NetCDF file (\verb|rmse_var = T|). The maximum and minimum values are useful information for a quick check of the reasonableness of the analysis: +Because the outer loop is set to two, the completion of the 2\textsuperscript{nd} outer loop marks the end of the analysis. The next step is to save the analysis results. Again, only a portion of variable "T" is shown and all other variables are listed according to variable name in the NetCDF file (\verb|rmse_var = T|). The maximum and minimum values are useful information for a quick sanity check of the analysis: \begin{scriptsize} \begin{verbatim} @@ -974,7 +973,7 @@ \section{Understanding Standard Output (\textit{stdout})} ... \end{verbatim} \end{scriptsize} -After completion of the analysis, the subroutine setuprhsall is called again if \verb|write_diag(3)=.true.|,to calculate analysis O-A information (the third time seeing this information): +After completion of the analysis, the subroutine "setuprhsall" is called again if \verb|write_diag(3)=.true.|, to calculate analysis and O-A information (this marks the third time this information is presented): \begin{scriptsize} \begin{verbatim} @@ -1004,7 +1003,7 @@ \section{Understanding Standard Output (\textit{stdout})} [000]gsisub(): : complete. \end{verbatim} \end{scriptsize} -The end of the GSI analysis (a successful analysis must reach this end, but to reach this end is not necessarily a successful analysis), which shows the time of ending this GSI run and some additional resource statistics: +The end of the GSI analysis (reaching this point does not necessarily guarantee a successful analysis), which shows the date and time when GSI finished and some additional resource statistics: \begin{scriptsize} \begin{verbatim} ENDING DATE-TIME JUL 02,2016 20:43:40.422 184 SAT 2457572 @@ -1025,7 +1024,7 @@ \section{Understanding Standard Output (\textit{stdout})} \end{verbatim} \end{scriptsize} -Different GSI applications may write out slightly different stdout information but the major flow and information are the same. A good knowledge of the stdout file gives users a clear picture how GSI runs through each part and the key information on a GSI run like data distribution and inner iterations. +Different GSI applications may write out slightly different \textbf{stdout} file information but the major flow and information are the same. A good knowledge of the \textbf{stdout} file gives users a clear picture of how GSI runs and the key information provided during a GSI run like data distribution and inner iterations. %------------------------------------------------------------------------------- @@ -1033,7 +1032,7 @@ \section{Single Observation Test} \label{sec4.2} %------------------------------------------------------------------------------- -A single observation test is a GSI run with only one (pseudo) observation at a specific location of the analysis domain. By examining the analysis increments from a single observation test, one can visualize the important features of the analysis, such as the ratio of background error and observation error variance and the pattern of the background error covariance. Therefore, the single observation test is the first check that users should do after successfully installing the GSI. +A single observation test is a GSI where only one (pseudo) observation is assimilated from a specific time and location within the analysis domain. By examining the analysis increments from a single observation test, one can visualize the important features of the analysis, such as the ratio of background error and observation error variance and the pattern of the background error covariance. Therefore, the single observation test is the first thing that users should run after successfully installing the GSI. %------------------------------------------------------------------------------- \subsection{Setup a Single Observation Test} @@ -1067,7 +1066,7 @@ \subsection{Setup a Single Observation Test} Note: \begin{itemize} -\item Please check Appendix C in this User\textquotesingle s Guide for the explanation of each parameter. From these parameters, we can see that a useful observation in the analysis should include information like the observation type (\verb|oneob_type|), value (\verb|maginnov|), error (\verb|magoberr|), location (\verb|oblat|, \verb|oblong|, \verb|obpres|), and time (\verb|obdattim|, \verb|obhourset|). Users can dump out (use \textit{ncdump}) the global attributes from the NetCDF background file and set \verb|oblat|=\textit{CEN\_LAT}, \verb|oblong|=\textit{360-CEN\_LON} to have the observation at the center of the domain. +\item Please check Appendix C in the User\textquotesingle s Guide for the explanation of each parameter. From these parameters, we can see that a useful observation in the analysis should include information like the observation type (\verb|oneob_type|), value (\verb|maginnov|), observation error (\verb|magoberr|), location (\verb|oblat|, \verb|oblong|, \verb|obpres|), and time (\verb|obdattim|, \verb|obhourset|). Users can dump out (use \textit{ncdump}) the global attributes from the NetCDF background file and set \verb|oblat|=\textit{CEN\_LAT}, \verb|oblong|=\textit{360-CEN\_LON} to have the observation at the center of the domain. \item In the analysis, the GSI first generates a prepbufr file including only one observation based on the information given in the namelist \verb|&SINGLEOB_TEST| section. To generate this prepbufr file, the GSI needs to read in a PrepBUFR table, which is not needed when running a GSI analysis with real observations. The BUFR table is in the \textit{fix/} directory and needs to be copied to the run directory. We have put the following lines in the GSI run script for the single observation test: \begin{scriptsize} \begin{verbatim} @@ -1081,7 +1080,7 @@ \subsection{Setup a Single Observation Test} \subsection{Examples of Single Observation Tests for GSI} %------------------------------------------------------------------------------- -Figure \ref{fig:singleobs} is a single observation test that has a temperature observation (\verb|oneob_type='t'|) with a 1 degree innovation (\verb|maginnov=1.0|) and a 0.8 degree observation error (\verb|magoberr=0.8|). The background error covariance converted from global (GFS) BE was picked for better illustration. +Figure \ref{fig:singleobs} is a single observation test that has a temperature observation (\verb|oneob_type='t'|) with a one degree innovation (\verb|maginnov=1.0|) and a 0.8 degree observation error (\verb|magoberr=0.8|). The background error covariance converted from global (GFS) BE was picked to provide for better illustration. \begin{figure}[h!] \centering @@ -1090,7 +1089,7 @@ \subsection{Examples of Single Observation Tests for GSI} \label{fig:singleobs} \end{figure} -This single observation was located at the center of the domain. The results are shown with figures of the horizontal and vertical cross sections through the point of maximum analysis increment. The Figure \ref{fig:singleobs} was generated using NCL scripts, which can be found in the \textit{util/Analysis\_Utilities/plots\_ncl} directory, introduced in Section A.4 . +This single observation was located at the center of the domain. The results are shown with figures of the horizontal and vertical cross sections through the point of maximum analysis increment. The Figure \ref{fig:singleobs} was generated using NCL scripts, which can be found in the \textit{util/Analysis\_Utilities/plots\_ncl} directory, introduced in Section A.4. %------------------------------------------------------------------------------- \section{Control Data Usage} @@ -1100,30 +1099,30 @@ \section{Control Data Usage} Observation data used in the GSI analysis can be controlled through three parts of the GSI system: \begin{enumerate} -\item In GSI run script, through linking observation BUFR files to working directory -\item In GSI namelist (inside \textit{comgsi\_namelist.sh}), through section \verb|&OBS_INPUT| -\item Through parameters in info files (e.g.: convinfo, satinfo, etc) +\item In the GSI run script, by linking observation BUFR files to the working directory +\item In section \verb|&OBS_INPUT| of the GSI namelist (inside \textit{comgsi\_namelist.sh}) +\item Through parameters in info files (e.g.: convinfo, satinfo, etc.) \end{enumerate} -Each part gives different levels of control to the data usage in the GSI, which is introduced below: +Each part provides different levels of control for data usage in the GSI, which is introduced below: \begin{enumerate}[leftmargin=*] -\item Link observation BUFR files to working directory in GSI run script:\\ +\item Link observation BUFR files to the working directory in the GSI run script:\\ -All BUFR/PrepBUFR observation files need to be linked to the working directory with GSI recognized names before can be used by GSI analysis. The run script (\textit{run\_gsi\_regional.ksh}) makes these links after locating the working directory. Turning on or off these links can control the use of all the data contained in the BUFR files. Table \ref{tab41} provides a list of all default observation file names recognized by GSI and the corresponding examples of the observation BUFR files from NCEP. The following is the first 3 rows of the table as an example: +All BUFR/PrepBUFR observation files need to be linked to the working directory with GSI recognizable names before they can be used in a GSI analysis. The run script (\textit{run\_gsi\_regional.ksh}) makes these links after locating the working directory. Turning these links on or off can control the use of all the data contained in the BUFR files. Table \ref{tab41} provides a list of all default observation file names recognized by GSI and the corresponding examples of the observation BUFR files from NCEP. The following is the first three rows of the table as an example: \begin{table}[htbp] \centering -\caption{list of all default observation file names recognized by GSI} +\caption{List of all default observation file names recognized by GSI.} \begin{tabular}{|p{2cm}|p{9cm}|p{5cm}|} \hline \hline GSI Name & Content & Example file names \\ \hline -prepbufr & Conventional observations, including ps, t, q, pw, uv, spd, dw, sst, from observation platforms such as METAR, sounding, et al. & \textit{gdas1.t12z.prepbufr} \\ +prepbufr & Conventional observations, including ps, t, q, pw, uv, spd, dw, sst, from observation platforms such as METAR, soundings, etc. & \textit{gdas1.t12z.prepbufr} \\ \hline satwndbufr & satellite winds & \textit{gdas1.t12z.satwnd.tm00.bufr\_d} \\ \hline -amsuabufr & AMSU-A 1b radiance (brightness temperatures) from satellites NOAA-15, 16, 17,18, 19 and METOP-A/B & \textit{gdas1.t12z.1bamua.tm00.bufr\_d} \\ +amsuabufr & AMSU-A 1b radiance (brightness temperatures) from satellites NOAA-15, 16, 17,18, 19, and METOP-A/B & \textit{gdas1.t12z.1bamua.tm00.bufr\_d} \\ \hline \end{tabular} \label{tab41} @@ -1140,11 +1139,11 @@ \section{Control Data Usage} ln -s ${OBS_ROOT}/gdas1.t12z.1bamua.tm00.bufr_d amsuabufr \end{verbatim} \end{scriptsize} -The GSI recognized default observation filenames are set up in the namelist section \verb|&OBS_INPUT|, which certainly can be changed based on application needs (details see below). \\ +The GSI recognized default observation filenames are set up in the namelist section \verb|&OBS_INPUT|, which can be changed based on application needs (see below for details). \\ -\item In GSI namelist (inside \textit{comgsi\_namelist.sh}), section \verb|&OBS_INPUT|:\\ +\item In the GSI namelist (inside \textit{comgsi\_namelist.sh}), section \verb|&OBS_INPUT|:\\ -In this namelist section, observation files (column of dfile) are tied to the observation variables used inside the GSI code (column of dsis), for example, part of section \verb|OBS_INPUT| shows: +In this namelist section, observation files ("dfile" column) are tied to the observation variables used inside the GSI code ("dsis" column). For example, part of section \verb|OBS_INPUT| shows: \begin{scriptsize} \begin{verbatim} @@ -1173,7 +1172,7 @@ \section{Control Data Usage} \end{verbatim} \end{scriptsize} -This setup tells GSI that conventional observation variables \verb|ps|, \verb|t|, and \verb|q| should be read in from the file prepbufr and AMSU-A radiances from NOAA-15 and -18 satellites should be read in from the file amsuabufr. Deleting a particular line in \verb|&OBS_INPUT| will turn off the use of the observation variable presented by the line in the GSI analysis but other variables under the same type still can be used. For example, if we delete: +This setup tells GSI that conventional observation variables \verb|ps|, \verb|t|, and \verb|q| should be read in from the file prepbufr, while AMSU-A radiances from NOAA-15 and NOAA-18 satellites should be read in from the file \textbf{amsuabufr}. Deleting a particular line in \verb|&OBS_INPUT| will turn off the use of the observation variable presented by the line in the GSI analysis but other variables under the same type can still be used. For example, if we delete: \begin{scriptsize} \begin{verbatim} @@ -1184,7 +1183,7 @@ \section{Control Data Usage} Then, the AMSU-A observation from NOAA-15 will not be used in the analysis but the AMSU-A observations from NOAA-18 will still be used.\\ -The observation filename in dfile can be different from the sample script (\textit{comgsi\_namelist.ksh}). If the filename in dfile has been changed, the link from the BUFR files to the GSI recognized name in the run script also needs to be changed correspondingly. For example, if we change the dfile for amsuabufr file for NOAA-15 to be \verb|amsuabufr_n15| +The observation filename in "dfile" can be different from the sample script (\textit{comgsi\_namelist.ksh}). If the filename in "dfile" has been changed, the link from the BUFR files to the GSI recognized name in the run script also needs to be changed correspondingly. For example, if we change the "dfile" in \textbf{amsuabufr} for NOAA-15 to be \verb|amsuabufr_n15|, \begin{scriptsize} \begin{verbatim} @@ -1203,12 +1202,12 @@ \section{Control Data Usage} \end{verbatim} \end{scriptsize} -The GSI will read NOAA-18 AMSU-A observations from file amsuabufr and NOAA-15 AMSU-A observations from file \verb|amsuabufr_n15| based on the above changes to the run scripts and namelist. In this example, both amsuabufr and \verb|amsuabufr_15| are linked to the same BUFR file and NOAA-15 AMSU-A and NOAA-18 AMSU-A observations are still read in from the same BUFR file. If amsuabufr and \verb|amsuabufr_15| link to different BUFR files, then NOAA-15 AMSU-A and NOAA-18 AMSU-A will be read in from different BUFR files. Clearly, the changeable filename in \textit{dfile} gives GSI more capability to handle multiple data resources.\\ +The GSI will read NOAA-18 AMSU-A observations from file \textbf{amsuabufr} and NOAA-15 AMSU-A observations from file \verb|amsuabufr_n15| based on the above changes to the run scripts and namelist. In this example, both \textbf{amsuabufr} and \verb|amsuabufr_15| are linked to the same BUFR file and NOAA-15 AMSU-A and NOAA-18 AMSU-A observations are still read in from the same BUFR file. If \textbf{amsuabufr} and \verb|amsuabufr_15| link to different BUFR files, then NOAA-15 AMSU-A and NOAA-18 AMSU-A will be read in from different BUFR files. The changeable filename in \textit{dfile} gives GSI more flexibility to handle multiple data resources.\\ \item Use info files to control data usage\\ -For each variable, observations can come from multiple platforms (data types or observation instruments). For example, surface pressure (ps) can come from METAR observation stations (data type 187) and Rawinsonde (data type 120). There are several files named *info in the GSI system (located in \textit{./fix}) to control the usage of observations based on the observation platform. Table \ref{tab42} is a list of info files and their function: +For each variable, observations can come from multiple platforms (data types or observation instruments). For example, surface pressure (ps) can come from METAR observation stations (data type 187) and rawinsonde (data type 120). There are several files named *info in the GSI system (located in \textit{./fix}) to control the usage of observations based on the observation platform. Table \ref{tab42} is a list of info files and their function: \begin{table}[htbp] \centering \caption{The content of info files } @@ -1217,13 +1216,13 @@ \section{Control Data Usage} \hline File name in GSI & Function and Content \\ \hline -convinfo & Control the usage of conventional data, including tcp, ps, t, q, pw, sst, uv, spd, dw, radial wind (Level 2 \textit{rw} and 2.5 \textit{srw}), gps, \textit{pm2\_5} \\ +convinfo & Control the usage of conventional data, including tcp, ps, t, q, pw, sst, uv, spd, dw, radial wind (Level 2 \textit{rw} and 2.5 \textit{srw}), gps, and \textit{pm2\_5} \\ \hline -satinfo & Control the usage of satellite data. Instruments include AMSU-A/B, HIRS3/4, MHS, ssmi, ssmis, iasi, airs, sndr, cris, amsre, imgr, seviri, atms, avhrr3, etc. and satellites include NOAA 15, 17, 18, 19, aqua, GOES 11, 12, 13, METOP-A/B, NPP, DMSP 15,16,17,18,19,20, +satinfo & Control the usage of satellite data. Instruments include AMSU-A/B, HIRS3/4, MHS, ssmi, ssmis, iasi, airs, sndr, cris, amsre, imgr, seviri, atms, avhrr3, etc., and satellites include NOAA 15, 17, 18, 19, aqua, GOES 11, 12, 13, METOP-A/B, NPP, DMSP 15,16,17,18,19,20, M08, M09, M10, etc.\\ -ozinfo & Control the usage of ozone data, including sbuv6, 8 from NOAA 14, 16, 17, 18, 19. omi\_aura, gome\_metop-a, mls\_aura \\ +ozinfo & Control the usage of ozone data, including sbuv6, 8 from NOAA 14, 16, 17, 18, 19. omi\_aura, gome\_metop-a, and mls\_aura \\ \hline -pcpinfo & Control the usage of precipitation data, including pcp\_ssmi, pcp\_tmi \\ +pcpinfo & Control the usage of precipitation data, including pcp\_ssmi and pcp\_tmi \\ \hline aeroinfo & Control the usage of aerosol data, including modis\_aqua and modis\_terra \\ \hline @@ -1231,12 +1230,12 @@ \section{Control Data Usage} \label{tab42} \end{table} -The header of each info file includes an explanation of the content of the file. Here we discuss the most commonly used two info files: +The header of each info file includes an explanation of the content of the file. Here we discuss the two most commonly used info files: \begin{itemize}[leftmargin=*] \item convinfo\\ -The convinfo is to control the usage of conventional data. The following is the part of the content of convinfo: +The \textbf{convinfo} file controls the usage of conventional data. The following is part of the \textbf{convinfo} file: \begin{tiny} \begin{verbatim} @@ -1283,9 +1282,9 @@ \section{Control Data Usage} sub & prepbufr subtype (not yet available) \\ \hline iuse & flag if to use/not use / monitor data; \newline -=1, use data, the data type will be read and used in the analysis after quality control;\newline -=0, read in and process data, use for quality control, but do NOT assimilate;\newline -=-1, monitor data. This data type will be read in and monitored but not be used in the GSI analysis. \\ += 1, use data, the data type will be read and used in the analysis after quality control;\newline += 0, read in and process data, use for quality control, but do NOT assimilate;\newline += -1, monitor data. This data type will be read in and monitored but not be used in the GSI analysis. \\ \hline twindow & time window (+/- hours) for data used in the analysis \\ \hline @@ -1295,15 +1294,15 @@ \section{Control Data Usage} \hline nmiter & cross validation parameter - external iteration to introduce removed data \\ \hline -gross & gross error parameter - gross error \\ +gross & gross error parameter - gross error \\ \hline ermax & gross error parameter - maximum error \\ \hline ermin & gross error parameter - minimum error \\ \hline -var\_b & variational quality control parameter - b parameter \\ +var\_b & variational quality control parameter - b parameter \\ \hline -var\_pg & variational quality control parameter - pg parameter\\ +var\_pg & variational quality control parameter - pg parameter\\ \hline ithin & Flag to turn on thinning (0, no thinning, 1 - thinning) \\ \hline @@ -1313,15 +1312,15 @@ \section{Control Data Usage} \hline npred & Number of bias correction predictors \\ \hline -pmot & the option to keep thinned data as monitored, 0: not to keep, other values: to keep \\ +pmot & Option to keep thinned data as monitored, 0: do not keep, other values: keep \\ \hline -ptime & time interval for thinning, 0, no temporal thinning, other values define time interval (less than 6) \\ +ptime & time interval for thinning, 0, no temporal thinning, other values define time interval (less than six) \\ \hline \end{tabular} \label{tab43} \end{table} -From this table, we can see that parameter iuse is used to control the usage of data and parameter twindow is to control the time window of data usage. Parameters gross, ermax, and ermin are for gross quality control. Through these parameters, GSI can control how to use certain types of the data in the analysis.\\ +From this table, we can see that parameter "iuse" is used to control the usage of data and parameter "twindow" is used to control the time window of data usage. Parameters gross, ermax, and ermin are for gross quality control. Through these parameters, GSI can control how to use certain types of data in the analysis.\\ \item satinfo\\ @@ -1370,7 +1369,7 @@ \section{Control Data Usage} \hline error & Variance for each satellite channel \\ \hline -error\_cld & Variance for each satellite channel if it is cloudy \\ +error\_cld & Variance for each satellite channel if cloudy \\ \hline ermax & Error maximum for gross check to observations \\ \hline @@ -1394,7 +1393,7 @@ \section{Domain Partition for Parallelization and Observation Distribution} \label{sec4.4} %------------------------------------------------------------------------------- -The standard output file (\textit{stdout}) has an information block that shows the distribution of different kinds of observations in each sub-domain. This block follows the observation input section. The following is the observation distribution of the case shown in Section \ref{sec4.1}. From the case introduction, we know the prepbufr (conventional data), radiance BUFR files, and GPS BUFR files were used. In this list, the conventional observations (\verb|ps|, \verb|t|, \verb|q|, \verb|pw|, and \verb|uv|), GPSRO (\verb|gps_ref|), and radiance data (\verb|amusa|, \verb|hirs4|, and \verb|mhs| from \verb|Metop-a|, \verb|Metop-b|, \verb|NOAA 15| and \verb|18|) were distributed among 4 sub-domains: +The standard output file (\textit{stdout}) has an information block that shows the distribution of different kinds of observations in each sub-domain. This block follows the observation input section. The following is the observation distribution from the case shown in Section \ref{sec4.1}. From the introduction, we know the prepbufr (conventional data), radiance BUFR files, and GPS BUFR files were used. In this list, the conventional observations (\verb|ps|, \verb|t|, \verb|q|, \verb|pw|, and \verb|uv|), GPSRO (\verb|gps_ref|), and radiance data (\verb|amusa|, \verb|hirs4|, and \verb|mhs| from \verb|Metop-a|, \verb|Metop-b|, \verb|NOAA 15|, and \verb|18|) were distributed among four sub-domains: \begin{scriptsize} \begin{verbatim} @@ -1426,7 +1425,7 @@ \section{Domain Partition for Parallelization and Observation Distribution} \section{Observation Innovation Statistics} %------------------------------------------------------------------------------- -The GSI analysis gives a group of files named \textit{fort.2*} to summarize observations fitting to the current solution in each outer loop (except for \textit{fort.220}, see explanation on \textit{fort.220} in next section). The content of some of these files is listed in Table \ref{tab45}: +The GSI analysis provides a set of files named \textit{fort.2*} to summarize observations fit to the current solution in each outer loop (except for \textit{fort.220}, see explanation in the next section). The content of some of these files is listed in Table \ref{tab45}: \begin{table}[htbp] \centering @@ -1442,16 +1441,16 @@ \section{Observation Innovation Statistics} \hline \textit{fort.203 or fit\_t1.analysis\_time} & fit of temperature data & K \\ \hline -\textit{fort.204 or fit\_q1.analysis\_time} & fit of moisture data & percent of guess qsaturation \\ +\textit{fort.204 or fit\_q1.analysis\_time} & fit of moisture data & percent of qsaturation guess \\ \hline \textit{fort.205} & fit of precipitation water data & mm \\ \hline -\textit{fort.206} & fit of ozone observations from sbuv6\_n14 (, \_n16, \_n17, \_n18), sbuv8\_n16 (, \_n17, \_n18, \_n19), omi\_aura, gome\_metop-a/b, mls\_aura & \\ +\textit{fort.206} & fit of ozone observations from sbuv6\_n14 (, \_n16, \_n17, \_n18), sbuv8\_n16 (, \_n17, \_n18, \_n19), omi\_aura, gome\_metop-a/b, and mls\_aura & \\ \hline \textit{fort.207 or fit\_rad1.analysis\_time} & fit of satellite radiance data, such as: -amsua\_n15(, n16, n17, n18, metop-a, aqua, n19), amsub\_n17, hirs3\_n17, hirs4\_n19 (, metop-a), etc & \\ +amsua\_n15(, n16, n17, n18, metop-a, aqua, n19), amsub\_n17, hirs3\_n17, hirs4\_n19 (, metop-a), etc. & \\ \hline -\textit{fort.208} & fit of prepcipitation rate (pcp\_ssmi, pcp\_tmi) & \\ +\textit{fort.208} & fit of precipitation rate (pcp\_ssmi and pcp\_tmi) & \\ \hline \textit{fort.209} & fit of radar radial wind (rw) & \\ \hline @@ -1463,29 +1462,29 @@ \section{Observation Innovation Statistics} \hline \textit{fort.213} & fit of conventional sst data & C \\ \hline -\textit{fort.214} & Tropical cyclone central pressure & \\ +\textit{fort.214} & fit of tropical cyclone central pressure & \\ \hline -\textit{fort.215} & Lagrangian tracer data & \\ +\textit{fort.215} & fit of Lagrangian tracer data & \\ \hline -\textit{Fort.217} & Fit of aerosol product (aod) & \\ +\textit{fort.217} & fit of aerosol product (aod) & \\ \hline -Fort.218 & Fit of wind gust & \\ +\textit{fort.218} & fit of wind gust & \\ \hline -Fort.219 & Fit of visibility & +\textit{Fort.219} & fit of visibility & \\ \hline \end{tabular} \label{tab45} \end{table} -To help users understand the information inside these files, some examples from these files are given in the following sub-sections with corresponding explanations. +To help users understand the information inside these files, some examples are given in the following sub-sections with corresponding explanations. %------------------------------------------------------------------------------- \subsection{Conventional observations} \label{sec4.5.1} %------------------------------------------------------------------------------- -Example of files including single level data (\textit{fort.201}, \textit{fort.205}, \textit{fort.213}) +Example of files, including single level data (\textit{fort.201}, \textit{fort.205}, and \textit{fort.213}) \begin{scriptsize} \begin{verbatim} @@ -1516,7 +1515,7 @@ \subsection{Conventional observations} \end{verbatim} \end{scriptsize} -Example of files including multiple level data (\textit{fort.202}, \textit{fort.203}, \textit{fort.204}) +Example of files including multiple level data (\textit{fort.202}, \textit{fort.203}, and \textit{fort.204}) \begin{scriptsize} \begin{verbatim} @@ -1568,13 +1567,13 @@ \subsection{Conventional observations} \end{verbatim} \end{scriptsize} -Please note 5 layers from 600 to 150 hPa have been deleted to make each row fit into one line. Only observation type 220 and 223 are shown as an example. +Please note that five layers from 600 to 150 hPa have been deleted to make each row fit into one line. Only observation type 220 and 223 are shown as an example. -The table \ref{tab46} lists the meaning of each item in file \textit{fort.201-213} except file \textit{fort.207}: +Table \ref{tab46} lists the meaning of each item in file \textit{fort.201-213} except file \textit{fort.207}: \begin{table}[htbp] \centering -\caption{list of each item in file fort.201-213 (except fort.207)} +\caption{List of each item in file fort.201-213 (except fort.207).} \begin{tabular}{|p{1cm}|p{10cm}|} \hline \hline @@ -1582,29 +1581,29 @@ \subsection{Conventional observations} \hline \textit{it} & outer loop number \newline = 01: observation - background \newline -= 02: observation - analysis (after 1st outer loop) \newline -= 03: observation - analysis (after 2nd outer loop) \\ += 02: observation - analysis (after 1\textsuperscript{st} outer loop) \newline += 03: observation - analysis (after 2\textsuperscript{nd} outer loop) \\ \hline -\textit{obs} & observation variable (such as uv, ps) and usage of the type, which include: \newline +\textit{obs} & observation variable type (such as uv or ps) and usage, which includes: \newline blank: used in GSI analysis \newline -mon: monitored, (read in but not assimilated by GSI). \newline +mon: monitored (read in but not assimilated by GSI) \newline rej: rejected because of quality control in GSI \\ \hline -\textit{type} & prepbufr observation type (see BUFR User\textquotesingle s Guide for details) \\ +\textit{type} & prepbufr observation type (see the BUFR User\textquotesingle s Guide for details) \\ \hline -\textit{styp} & prepbufr observation subtype (not used now) \\ +\textit{styp} & prepbufr observation subtype (not used) \\ \hline \textit{ptop} & for multiple level data: pressure at the top of the layer \\ \hline \textit{pbot} & for multiple level data: pressure at the bottom of the layer \\ \hline -\textit{count} & The number of observations summarized under observation types and vertical layers \\ +\textit{count} & the number of observations summarized under observation types and vertical layers \\ \hline -\textit{bias} & Bias of observation departure for each outer loop (it) \\ +\textit{bias} & bias of observation departure for each outer loop (it) \\ \hline -\textit{rms} & Root Mean Square of observation departure for each outer loop (it) \\ +\textit{rms} & root mean square error of observation departure for each outer loop (it) \\ \hline -\textit{cpen} & Observation part of penalty (cost function) \\ +\textit{cpen} & observation part of penalty (cost function) \\ \hline \textit{qcpen} & nonlinear qc penalty \\ \hline @@ -1612,10 +1611,9 @@ \subsection{Conventional observations} \label{tab46} \end{table} +The contents of the fit files are calculated based on O-B or O-A for each observation. The detailed departure information about each observation is saved in the diagnostic files. For the content of the diagnostic files, please check the content of the array "rdiagbuf" in one of the setup subroutines for conventional data (for example, setupt.f90). We provide a tool in appendix A.2 to help users read in the information from the diagnostic files. -The contents of the fit files are calculated based on O-B or O-A for each observation. The detailed departure information about each observation is saved in the diagnostic files. For the content of the diagnostic files, please check the content of the array rdiagbuf in one of the setup subroutines for conventional data, for example, setupt.f90. We provide a tool in appendix A.2 to help users read in the information from the diagnostic files. - -These fit files give lots of useful information on how data are analyzed by the GSI, such as how many observations are used and rejected, what is the bias and rms for certain data types or for all observations, and how analysis results fit to the observation before and after analysis. Again, we use observation type 220 in \textit{fort.202} (\textit{fit\_w1.2014061700}) as an example to illustrate how to read this information. The fit information for observation type 220 (sounding observation) is listed below. Like the previous example, 5 layers from 600 to 150 hPa were deleted to make each row fit into one line. All fit information of observation type 220 are shown. +These fit files give lots of useful information on how data are analyzed by the GSI, such as how many observations are used and rejected, the bias and root mean squared (RMS) error for certain data types or for all observations, and how analysis results fit to the observation before and after analysis. Again, we use observation type 220 in \textit{fort.202} (\textit{fit\_w1.2014061700}) as an example to illustrate how to read this information. The fit information for observation type 220 (soundings) is listed below. Like the previous example, five layers from 600 to 150 hPa were deleted to make each row fit into one line. All fit information of observation type 220 is shown. \begin{scriptsize} \begin{verbatim} @@ -1700,9 +1698,9 @@ \subsection{Conventional observations} \end{verbatim} \end{scriptsize} -In loop section \verb|o-g 01|, from \verb|count| line, we can see there are 4231 sounding observations used in the analysis. Among them, 44 are within the 1000-1200 hPa layer. Also from the \verb|count| lines, in the rejection and monitoring section, there are 800 observations rejected and 29 observations being monitored. In the same loop section, from the \verb|bias| line and \verb|rms| lines, we can see the total bias and rms of O-B for soundings is 0.59 and 4.18. The bias and rms of each layer for sounding observation can also be found in the file. +In loop section \verb|o-g 01|, from the \verb|count| line, we can see there were 4231 sounding observations used in the analysis. Among them, 44 were within the 1000-1200 hPa layer. Also from the \verb|count| lines, in the rejection and monitoring section, there were 800 observations rejected and 29 observations monitored. In the same loop section, from the \verb|bias| line and \verb|rms| lines, we can see the total bias and RMS error of O-B for the sounding information is 0.59 and 4.18. The bias and RMS error for each vertical layer can also be found in this file. -When reading bias and rms values from different loops, as shown with the comparison in the following three lines: +Next we can see bias and RMS error values from different loops, as shown with the comparison in the following three lines: \begin{scriptsize} \begin{verbatim} @@ -1712,16 +1710,16 @@ \subsection{Conventional observations} \end{verbatim} \end{scriptsize} -These three lines show that the rms reduced from 4.18 (o-g 01, which is O-B) to 3.73 (o-g 02, which is O-A after 1st outer loop) and then to 3.60 (o-g 03, which is O-A after 2nd outer loop, the final analysis result). The reduction in the rms shows the observation type 220 (sounding) was used in the GSI analysis to modify the background fields to fit to the observations. +These three lines show that the RMS error reduced from 4.18 (o-g 01, which is O-B) to 3.73 (o-g 02, which is O-A after the 1\textsuperscript{st} outer loop) and then to 3.60 (o-g 03, which is O-A after the 2\textsuperscript{nd} outer loop, which is also the final analysis result). The reduction in the RMS error shows that observation type 220 (sounding) was used in the GSI analysis to modify the background fields to fit to the observations. %------------------------------------------------------------------------------- \subsection{Satellite Radiance} \label{sec4.5.2} %------------------------------------------------------------------------------- -The file \textit{fort.207} is the statistic fit file for radiance data. Its content includes important information about the radiance data analysis. +The file \textit{fort.207} is the fit file for radiance data. Its content includes important information about the radiance data analysis. -The first part of the file \textit{fort.207} lists the content that corresponds to those in the file satinfo, which is the info file to control the data usage for radiance data. +The first part of the file \textit{fort.207} lists the content that corresponds to those in the file \textbf{satinfo}, which is the info file to control radiance data usage. \begin{tiny} \begin{verbatim} @@ -1737,7 +1735,7 @@ \subsection{Satellite Radiance} \end{verbatim} \end{tiny} -This shows there are 2723 channels listed in the \textit{satinfo} file and the 2723 lines following this line include the detailed setups in the \textit{satinfo} file for each channel. +This shows there are 2723 channels listed in the \textit{satinfo} file and the 2723 lines following this line include the details for each channel. The second part of the file is a list of the coefficients for bias correction, after reading the \textit{satbias\_in} file: @@ -1754,9 +1752,9 @@ \subsection{Satellite Radiance} \end{verbatim} \end{tiny} -Each channel has 12 coefficients listed in a line. Therefore, there are 2723 lines of radiance bias correction coefficients for all channels though some of the coefficients are 0. +Each channel has 12 coefficients listed in a line. Therefore, there are 2723 lines of radiance bias correction coefficients for all channels, though some of the coefficients are zero. -The 3rd part of the \textit{fort.207} file is similar to other fit files with similar content repeated in 3 sections to give detailed statistic information about the data in stages before the 1st outer loop, between 1st and 2nd outer loop, and after 2nd outer loop. The results before the 1st outer loop are used here as an example to explain the content of the statistic results: +The 3\textsuperscript{rd} part of the \textit{fort.207} file is similar to other fit files with content repeated in three sections, providing detailed statistics about the data in stages before the 1\textsuperscript{st} outer loop, between the 1\textsuperscript{st} and 2\textsuperscript{nd} outer loops, and after the 2\textsuperscript{nd} outer loop. The results before the 1\textsuperscript{st} outer loop are used here as an example to explain the content of the results: \begin{itemize}[leftmargin=*] @@ -1785,11 +1783,11 @@ \subsection{Satellite Radiance} \end{verbatim} \end{scriptsize} -The Table \ref{tab47} lists the meaning of each item in the above statistics: +Table \ref{tab47} outlines the meaning of each item in the above statistics: \begin{table}[htbp] \centering -\caption{content of summarizing radiance observation process in fort.207} +\caption{Summary of the radiance observation fit file (fort.207)} \begin{tabular}{|p{2cm}|p{10cm}|} \hline \hline @@ -1812,25 +1810,25 @@ \subsection{Satellite Radiance} \hline \textit{ireduce} & number of observations that reduce qc bounds in tropics \\ \hline -\textit{ivarl} & number of observations tossed by gross check \\ +\textit{ivarl} & number of observations removed by gross check \\ \hline -\textit{nlgross} & number of observation tossed by nonlinear qc\\ +\textit{nlgross} & number of observation removed by nonlinear qc\\ \hline \textit{qcpenalty} & nonlinear qc penalty from this data type \\ \hline -\textit{qc1-7} & number of observations whose quality control criteria has been adjusted by each qc method (1-7), details see in the Radiance Chapter of the Advanced User?s Guide \\ +\textit{qc1-7} & number of observations whose quality control criteria has been adjusted by each qc method (1-7). For details, see the Radiance Chapter of the Advanced User\textquotesingle s Guide \\ \hline \textit{rad total penalty\_all} & summary of penalty for all radiance observation types \\ \hline \textit{rad total qcpenalty\_all} & summary of qcpenalty for all radiance observation types \\ \hline -\textit{rad total failed nonlinqc} & summary of observation tossed by nonlinear qc for all radiance observation types \\ +\textit{rad total failed nonlinqc} & summary of observations removed by nonlinear qc for all radiance observation types \\ \hline \end{tabular} \label{tab47} \end{table} -Note: one radiance observation may include multiple channels, not all channels are used in the analysis. +Note: One radiance observation may include multiple channels, and not all channels are necessarily used in the analysis. \item Summaries for various statistics as a function of channel @@ -1861,11 +1859,11 @@ \subsection{Satellite Radiance} \end{verbatim} \end{scriptsize} -The Table \ref{tab48} lists the meaning of each column in above statistics: +Table \ref{tab48} lists the meaning of each column in the above statistics: \begin{table}[htbp] \centering -\caption{content of fit statistic for each channel in fort.207} +\caption{Content of fit statistics for each channel in the fort.207 file.} \begin{tabular}{|p{1.5cm}|p{10cm}|} \hline \hline @@ -1890,7 +1888,7 @@ \subsection{Satellite Radiance} \hline \textit{9} & penalty contribution from this channel \\ \hline -\textit{10} & sqrt of (observation-guess with bias correction)**2\\ +\textit{10} & square root of (observation-guess with bias correction)**2\\ \hline \textit{11} & standard deviation \\ \hline @@ -1925,11 +1923,11 @@ \subsection{Satellite Radiance} \end{verbatim} \end{scriptsize} -The table \ref{tab49} lists the meaning of each column in the above statistics: +Table \ref{tab49} lists the meaning of each column in the above statistics: \begin{table}[htbp] \centering -\caption{content of final summary section in fort.207} +\caption{Content of the final summary section for the fort.207 file.} \begin{tabular}{|p{2.0cm}|p{10cm}|} \hline \hline @@ -1960,8 +1958,6 @@ \subsection{Satellite Radiance} \label{tab49} \end{table} - - Similar to other fit files, a comparison between results from different outer loops can give us very useful information on how much impact each channel and data type has in the GSI. \end{itemize} @@ -2023,28 +2019,27 @@ \section{Convergence Information} \end{verbatim} \end{tiny} -We can see clearly the number of outer loops and the inner loops (Minimization iteration). The meaning of the names (bold) used in \textit{stdout} are explained in the following: +Here, we can see the number of outer and inner loops (minimization iteration). The meaning of the names (bold) used in \textit{stdout} are explained in the following: \begin{itemize} -\item \verb|cost|: the values of cost function, (=J) +\item \verb|cost|: the cost function values, (=J) \item \verb|grad|: inner product of gradients (norm of the gradient (Y*X)) \item \verb|step|: stepsize \item \verb|b|: parameter to estimate the new search direction \end{itemize} -As a quick check, the cost function reduced from 3.915930707165839704E+04 to 2.479985164931967302E+04 in the 1st outerloop and reduced from 2.792919782749931983E+04 to 2.474335402213209454E+04 in the 2nd outer loop. +As a quick check, the cost function reduced from 3.915930707165839704E+04 to 2.479985164931967302E+04 in the 1\textsuperscript{st} outer loop and reduced from 2.792919782749931983E+04 to 2.474335402213209454E+04 in the 2\textsuperscript{nd} outer loop. \item Convergence information in file \textit{fort.220}:\\ - In file \textit{fort.220}, users can find more detailed minimization information about each iteration. A detailed description and example are provided in the Advanced User\textquotesingle s Guide. -To evaluate the convergence of the iteration, we usually make plots based on the information from \textit{fort.220}, such as the value of the cost function and the norm of the gradient. The following are example plots showing the evolution of the cost function and the norm of gradient in different outer loops: +To evaluate the iteration convergence, we usually make plots based on the information from \textit{fort.220}, such as the value of the cost function and the norm of the gradient. The following are example plots showing the evolution of the cost function and the norm of the gradient in different outer loops: \begin{figure}[h!] \centering \includegraphics[width=0.7\textwidth]{images/CostGrad} - \caption{Evolution of cost function (left column) and the norm of gradient (right column) in the first outer loop (top raw) and the second outer loop (bottom raw). The Y-axis is the iteration number} + \caption{Evolution of the cost function (left column) and the norm of the gradient (right column) in the first outer loop (top row) and the second outer loop (bottom row). The Y-axis is the iteration number.} \label{fig:costgrad} \end{figure} @@ -2062,11 +2057,11 @@ \subsection{Getting Original Observation Errors} \label{sec4.7.1} %------------------------------------------------------------------------------- -For the global GSI analysis, when \verb|oberrflg| (a namelist option in section \verb|&obsqc|) is true, observation errors are generated based on an external observation error table according to the types of observations. Otherwise, observation errors are read in from the PrepBUFR file. +For the global GSI analysis, when \verb|oberrflg| (a namelist option in section \verb|&obsqc|) is true, observation errors are generated based on an external observation error table according to the observation type. Otherwise, observation errors are read in from the PrepBUFR file. -For regional GSI runs, GSI forces the use of an external observation error table to get observation errors no matter what the \verb|oberrflg| is set to (\verb|oberrflg| is forced to be true for regional runs in \textit{gsimod.F90}). +For regional analyses, GSI forces the use of an external observation error table to get observation errors no matter what the \verb|oberrflg| is set to (\verb|oberrflg| is forced to be true for regional runs in \textit{gsimod.F90}). -The external observation error table file, \textit{errtable}, includes observation errors for all types of conventional observations. It is copied from the \textit{~/comGSIv3.5\_EnKFv1.1/fix} directory by the run script. This release package has three sample external observation error table files, \textit{nam\_errtable.r3dv}, \textit{prepobs\_errtable.global}, and \textit{rtma/new\_rtma\_nam\_errtable.r3dv} in the \textit{./fix} directory. The \textit{nam\_errtable.r3dv} is used in the sample run script as a default observation error table. The observation error file is a text file that can be easily edited to tune the error values. The following shows a portion of \textit{nam\_errtable.r3dv} for rawinsondes and its description of each column in Table \ref{tab410}: +The external observation error table file, \textit{errtable}, includes observation errors for all types of conventional observations. It is copied from the \textit{./fix} directory by the run script. This release package has three sample external observation error table files, \textit{nam\_errtable.r3dv}, \textit{prepobs\_errtable.global}, and \textit{rtma/new\_rtma\_nam\_errtable.r3dv} in the \textit{./fix} directory. The \textit{nam\_errtable.r3dv} is used in the sample run script as a default observation error table. The observation error file is a text file that can be easily edited to tune the error values. The following shows a portion of \textit{nam\_errtable.r3dv} file for rawinsondes and its description of each column in Table \ref{tab410}: \begin{scriptsize} \begin{verbatim} @@ -2107,8 +2102,7 @@ \subsection{Getting Original Observation Errors} \label{tab410} \end{table} - -For each type of observation, the error table has 6 columns and 33 rows (levels). The 1st column prescribes 33 pressure levels, which cover from 1100 hPa to 0 hPa. The columns 2-6 prescribe the observation errors for temperature (T), relative humidity (RH), horizontal wind component (UV), surface pressure (Ps), and the total column precipitable water (Pw). The missing value is 0.10000E+10. +For each observation type, the error table has six columns and 33 rows (levels). The 1\textsuperscript{st} column prescribes 33 pressure levels, covering 1100 hPa to 0 hPa. Columns 2-6 prescribe observation errors for temperature (T), relative humidity (RH), horizontal wind component (UV), surface pressure (Ps), and the total column precipitable water (Pw). The missing value is 0.10000E+10. The observation error table for each observation type starts with the observation type number defined for the PrepBUFR files, such as: @@ -2119,7 +2113,7 @@ \subsection{Getting Original Observation Errors} \end{verbatim} \end{scriptsize} -The PrepBUFR data type number 100-199 are for temperature (T), moisture (q), and surface pressure (Ps) observations, while number 200-299 are horizontal wind component (UV) observations. The detailed explanation of each data type number can be found from the following table in the EMC website: +The PrepBUFR data type numbers 100-199 are for temperature (T), moisture (q), and surface pressure (Ps) observations, while numbers 200-299 are for horizontal wind component (UV) observations. A detailed explanation of each data type number can be found in the following table on the EMC website: \begin{small} \url{http://www.emc.ncep.noaa.gov/mmb/data_processing/prepbufr.doc/table_2.htm} @@ -2132,10 +2126,10 @@ \subsection{Getting Original Observation Errors} \end{small} %------------------------------------------------------------------------------- -\subsection{Observation Rrror Gross Error Check within GSI} +\subsection{Observation Error Gross Error Check within GSI} %------------------------------------------------------------------------------- -The gross error check is an important quality check step to exclude questionable observations that degrade the analysis. Users can adjust the threshold of the gross error check for each data type within the \textit{convinfo} file to make the gross error check tighter or looser for a certain data type. For example, the following is a part of \textit{convinfo} without the last five columns: +The gross error check is an important quality control step to exclude questionable observations that degrade the analysis. Users can adjust the threshold of the gross error check for each data type within the \textit{convinfo} file to make the gross error check more or less strict for a certain data type. For example, the following is a part of the \textit{convinfo} file without the last five columns: \begin{scriptsize} \begin{verbatim} @@ -2147,47 +2141,47 @@ \subsection{Observation Rrror Gross Error Check within GSI} \end{verbatim} \end{scriptsize} -The gross check for each data type is controlled by gross, ermax, and ermin. If an observation has observation error: obserror, then a gross check ratio is calculated: +The gross check for each data type is controlled by columns "gross", "ermax", and "ermin." If an observation has observation error set to "obserror," then a gross check ratio is calculated: \textit{ratio = (Observation-Background)/max(ermin,min(ermax,obserror))} -If \textit{ratio > gross}, then this observation fails the gross check and will not be used in the analysis. The unused observation is indicated as "rejection" in the fit files. +If \textit{ratio > gross}, then this observation fails the gross check and will not be used in the analysis. The unused observation is indicated as a "rejection" in the fit files. %------------------------------------------------------------------------------- \section{Background Error Covariance} \label{sec4.8} %------------------------------------------------------------------------------- -The GSI package has several files in \textit{~/comGSIv3.5\_EnKFv1.1/fix/} to hold the pre-computed background error statistics for different GSI applications with different grid configurations. Within the ./fix directory subdirectories \textit{./fix/Big\_Endian} and \textit{./fix/Little\_Endian} contain the fix files corresponding to each endianness. Since the GSI code has a build-in mechanism to interpolate the input background error matrix to any desired analysis grid, the following two background error files can be used to specify the B matrix for any GSI regional application. +The GSI package has several files in \textit{./fix} to hold the pre-computed background error statistics for different GSI applications with different grid configurations. Within the \textit{./fix} subdirectories \textit{./fix/Big\_Endian} and \textit{./fix/Little\_Endian} contain the fix files corresponding to each endianness. Since the GSI code has a build-in mechanism to interpolate the input background error matrix to any desired analysis grid, the following two background error files can be used to specify the B matrix for any GSI regional application. \begin{itemize} -\item \textit{nam\_nmmstat\_na.gcv} : contains the regional background error statistics, computed using forecasts from the NCEP\textquotesingle s NAM model covering North America. The values of this B matrix cover the northern hemisphere with 93 latitude lines from -2.5 degree to 89.5 degree with 60 vertical sigma levels from 0.9975289 to 0.01364. -\item \textit{nam\_glb\_berror.f77.gcv} : contains the global background errors based on the NCEP\textquotesingle s GFS model, a global forecast model. The values of this B matrix covers global with 192 latitude lines from -90 degree to 90 degree and with 42 vertical sigma levels from 0.99597 to 0.013831. +\item \textit{nam\_nmmstat\_na.gcv} : contains the regional background error statistics, computed using forecasts from the NCEP\textquotesingle s NAM model covering North America. The values of this B matrix cover the northern hemisphere with 93 latitude lines, from -2.5 degrees to 89.5 degrees with 60 vertical sigma levels from 0.9975289 to 0.01364. +\item \textit{nam\_glb\_berror.f77.gcv} : contains the global background errors based on NCEP\textquotesingle s GFS model, a global forecast model. The values of this B matrix cover the globe with 192 latitude lines from -90 degrees to 90 degrees and 42 vertical sigma levels from 0.99597 to 0.013831. \end{itemize} -These background error matrix files listed above are Big Endian binary files (therefore located in the \textit{Big\_Endian} directory). In the \textit{Little\_Endian} directory, \textit{nam\_nmmstat\_na.gcv} and \textit{nam\_glb\_berror.f77.gcv} are their Little Endian versions for certain computer platforms that cannot compile GSI with the Big Endian option. In this release version, GSI can be compiled with the Big Endian option with PGI and Intel. +The background error matrix files listed above are in Big Endian binary form (therefore located in the \textit{Big\_Endian} directory). In the \textit{Little\_Endian} directory, \textit{nam\_nmmstat\_na.gcv} and \textit{nam\_glb\_berror.f77.gcv} are their Little Endian versions for certain computer platforms that cannot compile GSI with the Big Endian option. In this release version, GSI can be compiled with the Big Endian option with PGI and Intel. %------------------------------------------------------------------------------- -\subsection{Tuning Background Error Covariance through Namelist and Anavinfo} +\subsection{Tuning Background Error Covariance through the Namelist and Anavinfo File} %------------------------------------------------------------------------------- -The final background error covariance matrix used in the GSI analysis are the content from the fixed file "berror", which is a copy of \textit{nam\_nmmstat\_na.gcv} or \textit{nam\_glb\_berror.f77.gcv}, multiplied by several factors set by the namelist and the anavinfo. +The final background error covariance matrix used in the GSI analysis is the content from the fixed file "berror", which is a copy of \textit{nam\_nmmstat\_na.gcv} or \textit{nam\_glb\_berror.f77.gcv}, multiplied by several factors set by the namelist and the \textbf{anavinfo} file. In GSI namelist, three variables are used for tuning horizontal and vertical impact scales: \begin{itemize} \item \verb|vs| scale factor for vertical correlation lengths for background error -\item \verb|hzscl(3)| scale factor for three scales specified for horizontal smoothing -\item \verb|hswgt(3)| weights to apply to each horizontal scales +\item \verb|hzscl(3)| scale factor for three scales specified for horizontal smoothing +\item \verb|hswgt(3)| weights to apply to each horizontal scales \end{itemize} -In the GSI anavinfo files, the column \textbf{as/tsfc\_sdv} in the \textit{control\_vector} section are factors for tuning the variance of each analysis control variable. +In the GSI anavinfo file, the column \textbf{as/tsfc\_sdv} in the \textit{control\_vector} section are factors for tuning the variance of each analysis control variable. -These values can be used to tuning the background error covariance used in the GSI analysis. For each background error matrix file, there are recommended values for these parameters listed in table \ref{tab411}. +These values can be used to tune the background error covariance used in the GSI analysis. For each background error matrix file, there are recommended values for these parameters listed in table \ref{tab411}. \begin{table}[htbp] \centering -\caption{recommended tuning values for the provided B matrix} +\caption{Recommended tuning values for the provided B matrix.} \begin{tabular}{|p{2cm}|p{5cm}|p{5cm}|} \hline \hline @@ -2241,7 +2235,7 @@ \subsection{Tuning Background Error Covariance through Namelist and Anavinfo} \section{Analysis Increments} %------------------------------------------------------------------------------- -Analysis increments are defined as the difference of analysis results minus background. A plot of analysis increments can help users to understand how the analysis procedure modifies the background fields according to observations, background and observation error covariance, and other constraints. You can either calculate \textit{analysis-guess} and plot the difference field or use the tools introduced in Appendix A.4 to make analysis increment figures for different analysis fields. +Analysis increments are defined as the difference between analysis results and the background (A-B). A plot of analysis increments can help users understand how the analysis procedure modifies the background fields according to observations, background and observation error covariances, and other constraints. You can either calculate \textit{analysis-guess} and plot the difference field or use the tools introduced in Appendix A.4 to make analysis increment figures for different analysis fields. %------------------------------------------------------------------------------- \section{Running Time and Memory Usage} @@ -2249,7 +2243,7 @@ \section{Running Time and Memory Usage} In addition to analysis increments, run time and memory usage are other important features of an analysis system, especially for operational code like the GSI. -The GSI standard output file (\textit{stdout}) gives the GSI start time and end time at the top and the end of the file. For example: +The GSI standard output file (\textit{stdout}) gives the GSI start time and end time of the analysis at the beginning and end of the file. For example: \begin{scriptsize} \begin{verbatim} @@ -2265,7 +2259,7 @@ \section{Running Time and Memory Usage} \end{verbatim} \end{scriptsize} -This tells us this case started at 20:36:21.760 and ended at 20:43:40.422. Meaning GSI used 7 minutes and 19 seconds to finish. +This tells us the analysis started at 20:36:21.760 and ended at 20:43:40.422, which means GSI used 7 minutes and 19 seconds to finish. Following the ending date-time, there is a resource statistics section at the end of the \textit{stdout} file, which gives information about run time and memory usage for the analysis: diff --git a/doc/GSI_user_guide/gsi_ch5.tex b/doc/GSI_user_guide/gsi_ch5.tex index 6798cc319..9940f2a1a 100644 --- a/doc/GSI_user_guide/gsi_ch5.tex +++ b/doc/GSI_user_guide/gsi_ch5.tex @@ -1,7 +1,7 @@ -\chapter{GSI Applications for Regional 3DVar and 3D Hybrid EnVar} +\chapter{GSI Applications for Regional 3DVar, Hybrid 3DEnVar and Hybrid 4DEnVar}\label{gsi_reg} \setlength{\parskip}{12pt} -In this chapter, the knowledge from the previous chapters will be applied to three regional GSI cases with different data sources. These examples are to give users a clear idea on how to set up GSI with various configurations and properly check the run status and analysis results in order to determine if a particular GSI application was successful. Note the examples here only use the WRF ARW system - WRF NMM runs are similar, but require different background and namelist options. +In this chapter, information from the previous chapters will be applied to three regional GSI cases with different data sources. These examples are to give users a clear idea of how to set up GSI with various configurations and properly check the run status and analysis results in order to determine if a particular GSI application was successful. Note that the examples here only use the WRF-ARW system - WRF-NMM runs are similar, but require different background and namelist options. For illustrations of all the cases, it is assumed that the reader has successfully compiled GSI on a local machine. For regional case studies, users should have the following data available: @@ -17,7 +17,7 @@ \chapter{GSI Applications for Regional 3DVar and 3D Hybrid EnVar} \url{ftp://ftpprd.ncep.noaa.gov/pub/data/nccf/com/nam/prod} -\textit{Note: NDAS PrepBUFR file was chosen to increase the amount of data used in the analysis (comparing to NAM PrepBUFR file) } +\textit{Note: An NDAS PrepBUFR file was chosen to increase the amount of data used in the analysis (compared to a NAM PrepBUFR file) } \end{itemize} \item Radiance data and GPS RO data @@ -32,7 +32,7 @@ \chapter{GSI Applications for Regional 3DVar and 3D Hybrid EnVar} \end{enumerate} -The following cases will give users an example of a successful GSI run with various data sources. Users are welcome to download these example data from the GSI users\textquotesingle \ webpage (online case for release version 3.5) or create a new background and get the observation data from the above server. The background and observations used in this case study are as follows: +The following cases will give users an example of a successful GSI run with various data sources. Users are welcome to download these example data from the GSI users\textquotesingle \ webpage (online case) or create a new background and get the observational data from the above server. The background and observations used in this case study are as follows: \begin{enumerate} \item Background files: wrfinput\_d01\_2014-06-17\_00:00:00 @@ -47,15 +47,15 @@ \chapter{GSI Applications for Regional 3DVar and 3D Hybrid EnVar} \centering \includegraphics[width=0.7\textwidth]{images/landmask} \end{minipage} - \caption{The terrain (left) and land mask (right) of the background used in this case study} + \caption{The terrain (left) and land mask (right) of the background used in this case study.} \label{fig:terland} \end{figure} \end{itemize} -\item Conventional data: NAM PrepBUFR data from 00UTC 17 June 2014 +\item Conventional data: NAM PrepBUFR data from 0000 UTC 17 June 2014 \begin{itemize} \item File: \textit{nam.t00z.prepbufr.tm00.nr} \end{itemize} -\item Radiance and GPS RO data: GDAS PREPBUFR data from 00 UTC 17 June 2014 +\item Radiance and GPS RO data: GDAS PREPBUFR data from 0000 UTC 17 June 2014 \begin{itemize} \item Files: \textit{gdas.t00z.1bamua.tm00.bufr\_d} @@ -65,19 +65,19 @@ \chapter{GSI Applications for Regional 3DVar and 3D Hybrid EnVar} \end{itemize} \end{enumerate} -This case study was run on a Linux cluster. Starting from version 3.2, the BUFR/PrepBUFR files do not need to be byte-swapped to little endian format. BUFRLIB can automatically handle byte order issues. +This case study was run on a Linux cluster. As of version 3.2, the BUFR/PrepBUFR files do not need to be byte-swapped to little endian format. BUFRLIB can automatically handle byte order issues. Assume the background file is located at: \textit{data/2014061700/arw} -all the observations are located at: +All the observations are located at: \textit{data/2014061700/obs} -and the GSI release version 3.5 is located at +And the GSI release version 3.6 is located at: -\textit{code/comGSIv3.5\_EnKFv1.1} +\textit{code/comGSI(version\_number)\_EnKF(version\_number)} %------------------------------------------------------------------------------- \section{Assimilating Conventional Observations with Regional GSI} @@ -92,7 +92,7 @@ \subsection{Run Script} \begin{itemize} \item Set up batch queuing system. \\ -To run GSI with multi-processors, a job queuing head has to be added at the beginning of the \textit{run\_gsi\_regional.ksh} script. The set up of the job queue is dependent on the machine and the job control system. More examples of the setup are described in section \ref{sec3.2.2}. The following example is set up to run on a Linux cluster supercomputer with LSF. The job head is as follows: +To run GSI with multi-processors, a job queuing section has to be added at the beginning of the \textit{run\_gsi\_regional.ksh} script. The set up of the job queue is dependent on the machine and the job control system. More setup examples are described in section \ref{sec3.2.2}. The following example is set up to run on a Linux cluster supercomputer with LSF. The job section is as follows: \begin{scriptsize} \begin{verbatim} @@ -107,10 +107,10 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -In order to find out how to set up the job head, a good method is to use an existing MPI job script and copy the job head over. +In order to find out how to set up the job section, a good method is to use an existing MPI job script and copy the job section over. \\ -\item Set up the number of processors and the job queue system used. For this example, LINUX\_PBS and 4 processors are used: +\item Set up the number of processors and the job queue system used. For this example, LINUX\_PBS and four processors are used: \begin{scriptsize} \begin{verbatim} @@ -155,7 +155,7 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -Set the GSI system used for this case, including the paths of fix files and the CRTM coefficients as well as the location of the GSI executable and the namelist file: +Set up the GSI system used for this case, including the paths of fix files and the CRTM coefficients as well as the location of the GSI executable and the namelist file: \begin{scriptsize} \begin{verbatim} @@ -177,7 +177,7 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -This example uses the ARW NetCDF background; therefore \verb|bk_core| is set to 'ARW'. The regional background error covariance file is used in this case, as set by \verb|bkcv_option=NAM|. Finally, the run scripts are set to clean the run directory to delete all temporary intermediate files. +This example uses the ARW NetCDF background; therefore \verb|bk_core| is set to 'ARW.' The regional background error covariance file is used in this case, as set by \verb|bkcv_option=NAM|. Finally, the run scripts are set to clean the run directory to delete all temporary intermediate files. \end{itemize} %------------------------------------------------------------------------------- @@ -213,19 +213,19 @@ \subsection{Run GSI and Check the Run Status} \end{verbatim} \end{scriptsize} -These files are CRTM coefficients that have been linked to this run directory through the GSI run script. Additionally, many other files are linked or copied to this run directory or generated during run, such as: +These are CRTM coefficient files that have been linked to this run directory through the GSI run script. Additionally, many other files are linked or copied to this run directory or generated during the run, such as: \begin{itemize} -\item \verb|stdout|: standard out file +\item \verb|stdout|: standard output file \item \verb|wrf_inout|: background file \item \verb|gsiparm.anl|: GSI namelist -\item \verb|prepbufr|: PrepBUFR file for conventional observation +\item \verb|prepbufr|: prepBUFR file for conventional observation \item \verb|convinfo|: data usage control for conventional data \item \verb|berror_stats|: background error file \item \verb|errtable|: observation error file \end{itemize} -The presence of these files indicates that the GSI run scripts have successfully set up a run environment for GSI and the GSI executable is running. While GSI is still running, checking the content of the standard output file (\textit{stdout}) can monitor the status of the GSI analysis: +The presence of these files indicates that the GSI run scripts have successfully set up a run environment for GSI and that the GSI executable is running. While GSI is still running, checking the content of the standard output file (\textit{stdout}) can monitor the status of the GSI analysis: \begin{tiny} \begin{verbatim} @@ -244,7 +244,7 @@ \subsection{Run GSI and Check the Run Status} \end{verbatim} \end{tiny} -The above output shows that GSI is in the inner iteration stage. It may take several minutes to finish the GSI run. Once GSI has finished running, the number of files in the directory will be greatly reduced from those during the run stage. This is because the run script was set to clean the run directory after a successful run. The important analysis result files and configuration files will remain in the run directory. Please check Section \ref{sec3.3} for more details on GSI run results. Upon successful completion of GSI, the run directory looks as follows: +The above output shows that GSI is in the inner iteration stage. It may take several minutes to finish the GSI run. Once GSI has finished running, the number of files in the directory will be greatly reduced from those during the run stage. This is because the run script was set to clean the working directory after a successful run. The important analysis result files and configuration files will remain. Please check Section \ref{sec3.3} for more details on GSI run results. Upon successful completion of GSI, the run directory will look as follows: \begin{scriptsize} \begin{verbatim} @@ -269,12 +269,12 @@ \subsection{Check for Successful GSI Completion} \label{sec5.1.3} %------------------------------------------------------------------------------- -It is important to always check for successful completion of the GSI analysis. But, completion of the GSI run without crashing does not guarantee a successful analysis. First, check the \textit{stdout} file in the run directory to make sure GSI completed each step without any obvious problems. The following are several important steps to check: +It is important to always check for a successful completion of the GSI analysis. However, completion of the GSI run without crashing does not guarantee a successful analysis. First, it is necessary to check the \textit{stdout} file in the run directory to make sure GSI completed each step without any obvious problems. The following are several important steps to check: \begin{enumerate} -\item Read in the anavinfo and namelist \\ +\item Read in the anavinfo and namelist files\\ -The following lines show GSI started normally and has read in the anavinfo and namelist: +The following lines show that GSI started normally and has read in the anavinfo and namelist files: \begin{scriptsize} \begin{verbatim} gsi_metguess_mod*init_: 2D-MET STATE VARIABLES: @@ -316,7 +316,7 @@ \subsection{Check for Successful GSI Completion} \end{scriptsize} \item Read in the background field\\ -The following lines in stdout immediately following the namelist section, indicate that GSI is reading the background fields. Checking the range of the max and min values will indicate if certain background fields are normal. +The following lines in standard output file, immediately following the namelist section, indicate that GSI is reading the background fields. Checking the range of the 'max' and 'min' values will indicate if certain background fields are normal. \begin{scriptsize} \begin{verbatim} @@ -325,7 +325,7 @@ \subsection{Check for Successful GSI Completion} iy,m,d,h,m,s= 2014 6 17 0 0 0 dh1 = 3 - rmse_var = SMOIS + RMS errore_var = SMOIS ndim1 = 3 ordering = XYZ staggering = N/A @@ -333,11 +333,11 @@ \subsection{Check for Successful GSI Completion} end_index = 332 215 4 0 WrfType = 104 ierr = 0 - rmse_var = T ndim1 = 3 dh1 = 3 + RMS errore_var = T ndim1 = 3 dh1 = 3 ............... - rmse_var = U ndim1= 3 + RMS errore_var = U ndim1= 3 WrfType = 104 WRF_REAL= 104 ierr = 0 ordering = XYZ staggering = N/A start_index = 1 1 1 0 end_index = @@ -352,7 +352,7 @@ \subsection{Check for Successful GSI Completion} \item Read in observational data\\ -Skipping through a majority of the content towards the middle of the stdout file, the following lines will appear: +Skipping through a majority of the content towards the middle of the standard output file, the following lines will appear: \begin{scriptsize} \begin{verbatim} @@ -368,7 +368,7 @@ \subsection{Check for Successful GSI Completion} \item Inner iteration\\ -The inner iteration step in the stdout file will look as follows: +The inner iteration step in the standard output file will look like this: \begin{tiny} \begin{verbatim} @@ -396,7 +396,7 @@ \subsection{Check for Successful GSI Completion} \end{verbatim} \end{tiny} -Following the namelist set up, similar information will be repeated for each inner loop. In this case, 2 outer loops with 50 inner loops in each outer loop have been set. The last iteration looks like: +Following the namelist set up, similar information will be repeated for each inner loop. In this case, two outer loops with 50 inner loops in each outer loop are shown. The last iteration looks like this: \begin{tiny} \begin{verbatim} @@ -412,7 +412,7 @@ \subsection{Check for Successful GSI Completion} \end{verbatim} \end{tiny} -Clearly, at the 45th iteration GSI met the stop threshold before getting to the maximum iteration number (50). As a quick check of the iteration: the J value should descend with each iteration. Here, J has a value of 3.249585514567150676E+04 at the beginning and a value of 2.283066393454704667E+04 at the final iteration. This means the value has reduced by about one third, which is an expected reduction.\\ +At the 45th iterationi, GSI met the stop threshold before getting to the maximum iteration number (50). As a quick check, the J value should descend with each iteration. Here, J has a value of 3.249585514567150676E+04 at the beginning and a value of 2.283066393454704667E+04 for the final iteration. Therefore, the value has reduced by about one third, which is an expected reduction.\\ \item Write out analysis results\\ @@ -424,7 +424,7 @@ \subsection{Check for Successful GSI Completion} max,min psfc= 102799.9 66793.78 max,min MU= 2799.898 -1195.195 - rmse_var=MU + RMS errore_var=MU ordering=XY WrfType,WRF_REAL= 104 104 ndim1= 2 @@ -453,7 +453,7 @@ \subsection{Check for Successful GSI Completion} \end{verbatim} \end{scriptsize} -After carefully investigating each portion of the stdout file, it can be concluded that GSI successfully ran through every step and there were no run issues. A more complete description of the stdout file can be found in Section \ref{sec4.1}. However, it cannot be concluded that GSI did a successful analysis until more diagnosis has been completed. +After carefully investigating each portion of the standard output file, it can be concluded that GSI successfully ran through every step and there were no run issues. A more complete description of the standard output file can be found in Section \ref{sec4.1}. However, it cannot be concluded that GSI successfully produced an analysis until more diagnosis has been completed. \end{enumerate} @@ -465,12 +465,12 @@ \subsection{Diagnose GSI Analysis Results} \subsubsection{Check Analysis Fit to Observations} %------------------------------------------------------------------------------- -The analysis uses observations to correct the background fields to fit the observations closer under certain constraints. The easiest way to confirm the GSI analysis results fit the observations better than the background is to check a set of files with names \textit{fort.2??}, where ?? is a number from 01 to 19 or larger than 20. In the run scripts, several fort files have also been renamed as \textit{fit\_t1} (\textit{q1}, \textit{p1}, \textit{rad1}, \textit{w1}).\textit{YYYYMMDDHH}. Please check Section \ref{sec4.5.1} for a detailed explanation of the fit files. Here illustrates how to use these fit files. +The analysis uses observations to correct the background fields to fit to the observations under certain constraints. The easiest way to confirm the GSI analysis results fit the observations better than the background is to check a set of files with names \textit{fort.2??}, where ?? is a number from 01 to 19 or larger than 20. In the run scripts, several "fort" files have also been renamed as \textit{fit\_t1} (\textit{q1}, \textit{p1}, \textit{rad1}, \textit{w1}).\textit{YYYYMMDDHH}. Please check Section \ref{sec4.5.1} for a detailed explanation of the fit files. Here, we illustrate how to use these fit files. \begin{itemize}[leftmargin=*] \item \textit{fit\_t1.2014061700} (\textit{fort.203}) -This file shows how the background and analysis fields fit to temperature observations. The contents of this file show five data types were used in the analysis: 120, 130, 132, 180 and 182. Also included are the number of observations, bias and rms of observation minus background (o-g 01) or analysis (o-g 03) on each level for the three data types. The following is a part of the file, only showing data types 120 and 180: +This file shows how the background and analysis fields fit to temperature observations. The contents of this file show five data types were used in the analysis: 120, 130, 132, 180, and 182. Also included are the number of observations, bias, and RMS error of observation minus background (o-g 01) or analysis (o-g 03) on each level for the three data types. The following is part of the file, only showing data types 120 and 180: \begin{tiny} \begin{verbatim} @@ -480,22 +480,22 @@ \subsubsection{Check Analysis Fit to Observations} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 01 t 120 0000 count 107 350 357 866 1153 719 252 450 551 884 745 7188 o-g 01 t 120 0000 bias 0.80 0.32 -0.10 -0.12 -0.15 -0.20 -0.24 -0.60 -0.22 0.15 -0.10 -0.07 - o-g 01 t 120 0000 rms 2.06 1.55 0.83 0.77 0.69 0.66 0.73 1.20 1.44 1.65 1.65 1.23 + o-g 01 t 120 0000 RMS error 2.06 1.55 0.83 0.77 0.69 0.66 0.73 1.20 1.44 1.65 1.65 1.23 o-g 01 t 120 0000 cpen 0.81 0.49 0.23 0.33 0.33 0.30 0.36 0.79 0.91 0.98 0.79 0.58 o-g 01 t 120 0000 qcpen 0.81 0.49 0.23 0.33 0.33 0.30 0.36 0.79 0.91 0.98 0.79 0.58 o-g 01 t 180 0000 count 339 35 0 0 0 0 0 0 0 0 0 374 o-g 01 t 180 0000 bias 0.17 1.12 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.26 - o-g 01 t 180 0000 rms 1.66 4.03 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 2.01 + o-g 01 t 180 0000 RMS error 1.66 4.03 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 2.01 o-g 01 t 180 0000 cpen 0.63 7.18 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.25 o-g 01 t 180 0000 qcpen 0.63 7.18 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.25 o-g 01 t 180 0001 count 1344 15 0 0 0 0 0 0 0 0 0 1359 o-g 01 t 180 0001 bias 0.82 4.17 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.86 - o-g 01 t 180 0001 rms 2.07 5.44 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 2.13 + o-g 01 t 180 0001 RMS error 2.07 5.44 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 2.13 o-g 01 t 180 0001 cpen 0.47 23.37 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.73 o-g 01 t 180 0001 qcpen 0.47 23.37 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.73 o-g 01 all count 1792 405 358 871 1172 725 325 800 651 884 745 9482 o-g 01 all bias 0.69 0.53 -0.10 -0.12 -0.15 -0.19 -0.09 -0.50 -0.04 0.15 -0.10 0.08 - o-g 01 all rms 1.99 2.14 0.83 0.77 0.69 0.67 0.84 1.32 1.58 1.65 1.65 1.45 + o-g 01 all RMS error 1.99 2.14 0.83 0.77 0.69 0.67 0.84 1.32 1.58 1.65 1.65 1.45 o-g 01 all cpen 0.52 1.91 0.23 0.33 0.36 0.31 0.44 0.97 1.18 0.98 0.79 0.68 o-g 01 all qcpen 0.52 1.91 0.23 0.33 0.36 0.31 0.44 0.97 1.18 0.98 0.79 0.68 @@ -504,35 +504,35 @@ \subsubsection{Check Analysis Fit to Observations} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 03 t 120 0000 count 107 350 357 866 1153 719 252 450 551 884 745 7188 o-g 03 t 120 0000 bias 0.58 0.29 -0.04 -0.02 -0.04 -0.02 0.01 -0.16 -0.04 0.06 0.04 0.01 - o-g 03 t 120 0000 rms 1.72 1.35 0.70 0.61 0.49 0.43 0.50 0.79 1.14 1.40 1.59 1.05 + o-g 03 t 120 0000 RMS error 1.72 1.35 0.70 0.61 0.49 0.43 0.50 0.79 1.14 1.40 1.59 1.05 o-g 03 t 120 0000 cpen 0.57 0.33 0.14 0.19 0.16 0.12 0.18 0.34 0.57 0.72 0.73 0.39 o-g 03 t 120 0000 qcpen 0.57 0.33 0.14 0.19 0.16 0.12 0.18 0.34 0.57 0.72 0.73 0.39 o-g 03 t 180 0000 count 339 35 0 0 0 0 0 0 0 0 0 374 o-g 03 t 180 0000 bias -0.24 0.21 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 -0.19 - o-g 03 t 180 0000 rms 1.55 2.83 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.71 + o-g 03 t 180 0000 RMS error 1.55 2.83 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.71 o-g 03 t 180 0000 cpen 0.34 2.57 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.55 o-g 03 t 180 0000 qcpen 0.34 2.57 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.55 o-g 03 t 180 0001 count 1344 16 0 0 0 0 0 0 0 0 0 1360 o-g 03 t 180 0001 bias 0.30 1.97 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.32 - o-g 03 t 180 0001 rms 1.75 2.88 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.77 + o-g 03 t 180 0001 RMS error 1.75 2.88 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.77 o-g 03 t 180 0001 cpen 0.27 6.05 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.34 o-g 03 t 180 0001 qcpen 0.27 6.05 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.34 o-g 03 all count 1792 406 358 871 1172 725 325 800 651 884 745 9483 o-g 03 all bias 0.21 0.34 -0.04 -0.02 -0.04 -0.02 0.04 -0.13 0.06 0.06 0.04 0.05 - o-g 03 all rms 1.71 1.61 0.69 0.61 0.49 0.43 0.61 0.94 1.26 1.40 1.59 1.22 + o-g 03 all RMS error 1.71 1.61 0.69 0.61 0.49 0.43 0.61 0.94 1.26 1.40 1.59 1.22 o-g 03 all cpen 0.30 0.75 0.14 0.19 0.18 0.14 0.24 0.49 0.76 0.72 0.73 0.42 o-g 03 all qcpen 0.30 0.75 0.14 0.19 0.18 0.14 0.24 0.49 0.76 0.72 0.73 0.42 \end{verbatim} \end{tiny} -For example: data type 120 has 1153 observations in layer 400.0-600.0 hPa, a bias of -0.15, and a rms of 0.69. The last column shows the statistics for the whole atmosphere. There are several summary lines for all data types, which is indicated by "all" in the data types column. For summary O-B (which is "o-g 01" in the file), we have 9482 observations total, a bias of 0.08, and a rms of 1.45. \\ +For example, data type 120 has 1153 observations in layer 400.0-600.0 hPa, a bias of -0.15, and a RMS error of 0.69. The last column shows the statistics for the whole atmosphere. There are several summary lines for all data types, which is indicated by "all" in the data types column. For summary O-B (which is "o-g 01" in the file), there are 9482 observations in total, for a bias of 0.08, and a RMS error of 1.45. \\ - Skipping ahead in the fort file, "o-g 03" columns (under "it") show the observation minus analysis (O-A) information. Under the summary ("all") lines, it can be seen that there were 9483 total observations, a bias of 0.05, and a rms of 1.22. This shows that from the background to the analysis, one more observation data is being used because of the recalculation of the innovation and the gross check after each outer loop, the bias reduced from 0.08 to 0.05, and the rms reduced from 1.45 to 1.22. This is about a 16\% reduction, which is a reasonable value for large-scale analysis. \\ + Skipping ahead in the "fort" file, "o-g 03" columns (under "it") show the observation minus analysis (O-A) information. Under the summary ("all") rows, it can be seen that there were 9483 total observations, a bias of 0.05, and a RMS error of 1.22. This shows that from the background to the analysis, one more observation data point is being used because of the recalculation of the innovation and the gross check after each outer loop, the bias reduced from 0.08 to 0.05, and the RMS error reduced from 1.45 to 1.22. This is about a 16\% reduction, which is a reasonable value for a large-scale analysis. \\ \item \textit{fit\_w1.2014061700} (\textit{fort.202}) -This file demonstrates how the background and analysis fields fit to wind observations. This file (as well as \textit{fit\_q1}) are formatted the same as the \textit{fort.203}. Therefore, only the summary lines will be shown for O-B and O-A to gain a quick view of the fitting: +This file demonstrates how the background and analysis fields fit to wind observations. This file (as well as \textit{fit\_q1}) is formatted the same way as \textit{fort.203}. Therefore, only the summary lines for O-B and O-A will be shown here to gain a quick view of the fit to observations: \begin{tiny} \begin{verbatim} @@ -542,19 +542,19 @@ \subsubsection{Check Analysis Fit to Observations} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 01 all count 1597 1703 1839 2930 1213 828 290 687 533 694 798 14513 o-g 01 all bias 0.27 0.84 0.68 0.61 0.56 0.45 0.67 0.91 0.48 0.83 1.21 0.64 - o-g 01 all rms 2.50 2.65 2.52 3.11 4.02 3.98 4.37 4.31 5.32 5.41 4.77 3.59 + o-g 01 all RMS error 2.50 2.65 2.52 3.11 4.02 3.98 4.37 4.31 5.32 5.41 4.77 3.59 ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 03 all count 1608 1695 1843 2931 1212 828 290 687 533 694 798 14520 o-g 03 all bias 0.23 0.42 0.26 0.30 0.37 0.33 0.22 0.37 0.32 0.67 1.22 0.39 - o-g 03 all rms 2.27 2.16 1.94 2.23 2.74 2.82 3.64 3.31 4.22 4.43 4.41 2.90 + o-g 03 all RMS error 2.27 2.16 1.94 2.23 2.74 2.82 3.64 3.31 4.22 4.43 4.41 2.90 \end{verbatim} \end{tiny} - \hspace{1cm} O-B: 14513 observations in total, bias is 0.64 and rms is 3.59 + \hspace{1cm} O-B: 14513 observations in total, bias is 0.64, and RMS error is 3.59 - \hspace{1cm} O-A: 14520 observations in total, bias is 0.39 and rms is 2.90\\ -The total bias was reduced from 0.64 to 0.39 and the rms reduced from 3.59 to 2.90 (~20\% reduction).\\ + \hspace{1cm} O-A: 14520 observations in total, bias is 0.39, and RMS error is 2.90\\ +The total bias was reduced from 0.64 to 0.39 and the RMS error was reduced from 3.59 to 2.90 (~20\% reduction).\\ \item \textit{fit\_q1.2014061700} (\textit{fort.204}) @@ -569,43 +569,43 @@ \subsubsection{Check Analysis Fit to Observations} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 01 all count 543 186 182 211 146 457 406 520 621 623 0 3895 o-g 01 all bias 1.17 -3.68 -2.47 -1.30 -3.55 0.19 0.64 -1.80 -4.28 -5.55 0.00 -2.05 - o-g 01 all rms 9.09 10.63 9.03 9.34 12.73 12.30 14.53 15.27 16.45 16.01 0.00 13.66 + o-g 01 all RMS error 9.09 10.63 9.03 9.34 12.73 12.30 14.53 15.27 16.45 16.01 0.00 13.66 ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 03 all count 543 186 182 211 146 457 406 520 621 623 0 3895 o-g 03 all bias -0.39 -0.88 -0.68 0.45 -0.51 0.06 0.13 -0.10 -0.70 -1.90 0.00 -0.53 - o-g 03 all rms 5.48 5.19 4.37 5.73 8.13 9.31 12.19 13.82 13.01 12.36 0.00 10.64 + o-g 03 all RMS error 5.48 5.19 4.37 5.73 8.13 9.31 12.19 13.82 13.01 12.36 0.00 10.64 \end{verbatim} \end{tiny} -\hspace{1cm} O-B: 3895 observations in total and bias is -2.05 and rms is 13.66 +\hspace{1cm} O-B: 3895 observations in total, bias is -2.05, and RMS error is 13.66 -\hspace{1cm} O-A: 3895 observations in total and bias is -0.53 and rms is 10.64\newline -The total bias and rms were reduced. \\ +\hspace{1cm} O-A: 3895 observations in total, bias is -0.53, and RMS error is 10.64\newline +The total bias and RMS error were reduced. \\ \item \textit{fit\_p1.2014061700} (\textit{fort.201}) - This file demonstrates how the background and analysis fields fit to surface pressure observations. Because the surface pressure is a two-dimensional field, the table is formatted different than the three-dimensional fields shown above. Once again, the summary lines will be shown for O-B and O-A to gain a quick view of the fitting: + This file demonstrates how the background and analysis fields fit to surface pressure observations. Because surface pressure is a two-dimensional field, the table is formatted differently than the three-dimensional fields shown above. Once again, only the summary lines will be shown for O-B and O-A to gain a quick view of the fit to observations: \begin{scriptsize} \begin{verbatim} -------------------------------------------------- pressure levels (hPa)= 0.0 2000.0 - it obs type stype count bias rms cpen qcpen + it obs type stype count bias RMS error cpen qcpen o-g 01 all 13890 0.1912 0.7931 0.4105 0.4105 -------------------------------------------------- o-g 03 all 13916 0.0403 0.6764 0.2921 0.2921 \end{verbatim} \end{scriptsize} -\hspace{1cm} O-B: 13890 observations in total and bias is 0.1912 and rms is 0.7931 +\hspace{1cm} O-B: 13890 observations in total, bias is 0.1912, and RMS error is 0.7931 -\hspace{1cm} O-A: 13916 observations in total and bias is 0.0403 and rms is 0.6764\newline +\hspace{1cm} O-A: 13916 observations in total, bias is 0.0403, and RMS error is 0.6764\newline -Both the total bias and rms were reduced. \\ +Both the total bias and RMS error were reduced. \\ -These statistics show that the analysis results fit to the observations closer than the background, which is what the analysis is supposed to do. How close the analysis fit to the observations is based on the ratio of background error variance and observation error. +These statistics show that the analysis results fit to the observations closer than the background, which is what we would expect. How close the analysis fits to the observations is based on the ratio of background error variance and observation error. \end{itemize} @@ -614,9 +614,9 @@ \subsubsection{Check the Minimization} \label{sec5.1.4.2} %------------------------------------------------------------------------------- -In addition to the minimization information in the stdout file, GSI writes more detailed information into a file called fort.220. The content of fort.220 is explained in the Advanced GSI User\textquotesingle s Guide. Below is an example of a quick check of the trend of the cost function and norm of gradient. The value should get smaller with each iteration step. +In addition to the minimization information in the standard output file, GSI writes more detailed information into a file called "fort.220." The content of "fort.220" is explained in the Advanced GSI User\textquotesingle s Guide. Below is an example of a quick check of the cost function trend and the norm of gradient. The values should get smaller with each iteration. -In the run directory, the cost function and norm of the gradient information can be dumped into an output file by using the command: +In the run directory, information on the cost function and norm of the gradient can be dumped into an output file by using the following command: \begin{scriptsize} \begin{verbatim} @@ -624,7 +624,7 @@ \subsubsection{Check the Minimization} \end{verbatim} \end{scriptsize} -The file \textit{cost\_gradient.txt} includes 6 columns, however only the first 4 columns are needed and are explained below. The first 5 and last 5 lines read are: +The file \textit{cost\_gradient.txt} includes six columns, however only the first four columns are needed and are explained below. The first five and last five lines read are: \begin{scriptsize} \begin{verbatim} @@ -643,9 +643,9 @@ \subsubsection{Check the Minimization} \end{verbatim} \end{scriptsize} -The first column is the outer loop number and the second column is the inner iteration number. The third column is the cost function, and the forth column is the norm of gradient. It can be seen that both the cost function and norm of gradient are descending. +The first column is the outer loop number and the second column is the inner iteration number. The third column is the cost function, and the forth column is the norm of the gradient. It can be seen that both the cost function and norm of the gradient are descending. -To get a complete picture of the minimization process, the cost function and norm of gradient can be plotted using a provided NCL script located under: +To get a complete picture of the minimization process, the cost function and norm of the gradient can be plotted using an included NCL script located here: \begin{scriptsize} \begin{verbatim} @@ -653,23 +653,23 @@ \subsubsection{Check the Minimization} \end{verbatim} \end{scriptsize} -the plot is shown as Fig.\ref{fig:costgrad_ch5}: +The plot is shown as Fig.\ref{fig:costgrad_ch5}: \begin{figure}[h!] \centering \includegraphics[width=0.7\textwidth]{images/CostGrad_ch5} - \caption{Cost function (y-axes) and norm of gradient(y-axes) change with iteration steps (x-axes).} + \caption{The cost function (y-axes) and norm of the gradient (y-axes) change with each iteration (x-axes).} \label{fig:costgrad_ch5} \end{figure} -The above plots demonstrate that both the cost function and norm of gradient descend very fast in the first 10 iterations in both outer loops and drop very slowly after the 10th iteration. +The above plots demonstrate that both the cost function and norm of the gradient descend very fast in the first ten iterations in both outer loops and drop very slowly afterward. %------------------------------------------------------------------------------- \subsubsection{Check the Analysis Increment} \label{sec5.1.4.3} %------------------------------------------------------------------------------- -The analysis increment gives us an idea where and how much the background fields have been modified by the observations through analysis. Another useful graphics tool that can be used to look at the analysis increment is located under: +The analysis increment gives us an idea of where and how much the background fields have been modified by the observations through the analysis. Another useful graphics tool that can be used to look at the analysis increment is located here: \begin{scriptsize} \begin{verbatim} @@ -677,7 +677,7 @@ \subsubsection{Check the Analysis Increment} \end{verbatim} \end{scriptsize} -The graphic below shows the analysis increment at the 15th sigma level in analysis grid. Notice that the scales are different for each of the plots. +The graphic below shows the analysis increment at the 15th sigma (vertical) level on the analysis grid. Notice that the scales are different for each of the plots. \begin{figure}[h!] \centering @@ -686,7 +686,7 @@ \subsubsection{Check the Analysis Increment} \label{fig:increments} \end{figure} -The analysis increment shows the fact that the conventional observations are mostly located in the U.S. CONUS domain and the data availability over the ocean is very sparse. +The analysis increment indicates that conventional observations are mostly located within the continental United States and that data availability over the ocean is very sparse. %------------------------------------------------------------------------------- \section{Assimilating Radiance Data with Regional GSI} @@ -697,12 +697,12 @@ \subsection{Run Script} \label{sec5.2.1} %------------------------------------------------------------------------------- -Adding radiance data into the GSI analysis is very straightforward after a successful run of GSI with conventional data. The same run script from the above section can be used to run GSI with radiance with or without PrepBUFR data. The key step to adding the radiance data is linking the radiance BUFR data files to the GSI run directory with the names listed in the \verb|&OBS_INPUT| section of the GSI namelist. The following example adds the two radiance BUFR files: +Adding radiance data into the GSI analysis is straightforward after having already run GSI with conventional data. The same run script from the above section can be used to run GSI with radiance data (with or without PrepBUFR data). The key step to adding the radiance data is linking the radiance BUFR files to the GSI run directory with the names listed in the \verb|&OBS_INPUT| section of the GSI namelist. The following example adds the two radiance BUFR files: AMSU-A: \textit{gdas1.t00z.1bamua.tm00.bufr\_d}\newline HIRS4: \textit{gdas1.t00z.1bhrs4.tm00.bufr\_d} -The location of these radiance BUFR files has been previously saved to the scripts variable \verb|OBS_ROOT|, therefore the following two lines can be inserted below the link to the PrepBUFR data in the script \textit{run\_gsi\_regional.ksh}: +The location of these radiance BUFR files is already included in the scripts variable \verb|OBS_ROOT|, therefore the following two lines can be inserted below the link to the prepBUFR data in the script \textit{run\_gsi\_regional.ksh}: \begin{scriptsize} \begin{verbatim} @@ -711,7 +711,7 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -If it is desired to run radiance data in addition to the conventional PrepBUFR data, the following link to the PrepBUFR should be kept as is: +If radiance data is desired in addition to conventional prepBUFR data, the following link to the prepBUFR data should be kept as is: \begin{scriptsize} \begin{verbatim} @@ -719,7 +719,7 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -Alternatively to analyze radiance data without conventional PrepBUFR data, this line can be commented out in the script \textit{run\_gsi\_regional.ksh}: +Alternatively, to analyze radiance data without conventional prepBUFR data, this line can be commented out in the script \textit{run\_gsi\_regional.ksh}: \begin{scriptsize} \begin{verbatim} @@ -727,9 +727,9 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -In the following example, the case study will include both radiance and conventional observations. +In the following example, both radiance and conventional observations will be assimilated. -In order to link the correct name for the radiance BUFR file, the namelist section \verb|&OBS_INPUT| should be referenced. This section has a list of data types and BUFR file names that can be used in GSI. The 1st column \verb|"dfile"| is the file name recognized by GSI. The 2nd column \verb|"dtype"| and 3rd column \verb|"dplat"| are the data type and data platform that are included in the file listed in "dfile", respectively. For example, the following line tells us the AMSU-A observation from NOAA-15 should be read from a BUFR file named as \textit{"amsuabufr"}: +In order to link the correct name for the radiance BUFR file, the namelist section \verb|&OBS_INPUT| should be referenced. This section has a list of data types and BUFR file names that can be used in GSI. The 1\textsuperscript{st} column \verb|"dfile"| is the file name recognized by GSI. The 2\textsuperscript{nd} column \verb|"dtype"| and 3\textsuperscript{rd} column \verb|"dplat"| are the data type and data platform that are included in the file listed in "dfile," respectively. For example, the following line tells us the AMSU-A observation from NOAA-15 should be read from a BUFR file named \textit{"amsuabufr"}: \begin{scriptsize} \begin{verbatim} @@ -738,12 +738,12 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -With radiance data assimilation, two important setups, data thinning and bias correction, need to be checked carefully. The following is a brief description of these two setups: +With radiance data assimilation, data thinning and bias correction need to be checked carefully. The following is a brief description of these two: \begin{itemize} \item Radiance data thinning -The radiance data thinning is setup in the namelist section \verb|&OBS_INPUT|. The following is a part of namelist in that section: +Radiance data thinning is found in the namelist section \verb|&OBS_INPUT|. The following is a part of namelist in that section: \begin{scriptsize} \begin{verbatim} @@ -753,11 +753,11 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -The first line of \verb|&OBS_INPUT| lists multiple mesh grids as elements of the array \verb|dmesh| (three mesh grids in the above example). For the line specifying a data type, the 2nd last element of that line is to specify the choice of \verb|dthin|. This selects the mesh grid to be used for thinning. It can be seen that the data thinning option for NOAA-15 AMSU-A observations is 60 km because the value of \verb|dthin| is 2, corresponding to \verb|dmesh(2)|=60 km. For more information about radiance data thinning, please refer to the Advanced GSI User\textquotesingle s Guide.\\ +The first line of \verb|&OBS_INPUT| lists multiple mesh grids as elements of the array \verb|dmesh| (three mesh grids in the above example). For the line specifying data type, the 2\textsuperscript{nd} to last element of that line is used to specify the choice of \verb|dthin|. This selects the mesh grid to be used for thinning. The data thinning option for NOAA-15 AMSU-A observations is set to 60 km because the value of \verb|dthin| is two, corresponding to \verb|dmesh(2)|=60 km. For more information about radiance data thinning, please refer to the Advanced GSI User\textquotesingle s Guide.\\ \item Radiance data bias correction -The radiance data bias correction is very important for a successful radiance data analysis. In the sample run scripts, there are two files related to bias correction: +Radiance data bias correction is very important for successful radiance data assimilation. In the sample run scripts, there are two files related to bias correction: \begin{scriptsize} \begin{verbatim} @@ -767,9 +767,9 @@ \subsection{Run Script} \end{verbatim} \end{scriptsize} -For this case, the GDAS bias correction files were downloaded and saved in the fix directory as examples. For different cases, the run script should have those two lines to link the bias correction coefficient files. The first line sets the path to the bias coefficient file, and the second copies the bias correction coefficients for passive (monitored) channels into the working directory. Those two coefficient files are usually calculated from within GSI in the previous cycle. These two files are provided in ./fix as an example of the bias correction coefficients. For the best results, it will be necessary for the user to generate their own bias files. The details of the radiance data bias correction are discussed in the Advanced GSI User\textquotesingle s Guide. Please note the GSI release version before v3.5 has coefficients for mass bias correction and angle bias correction calculated separately. \\ +For this case, the GDAS bias correction files were downloaded and saved in the fix directory as examples. For other cases, the run script should link to corresponding bias correction coefficient files. The first line sets the path to the bias coefficient file, and the second copies the bias correction coefficients for passive (monitored) channels into the working directory. These two coefficient files are usually calculated from within GSI in the previous cycle. Two files are provided in ./fix as examples of the bias correction coefficients. For the best results, it is necessary for the user to generate his or her own bias files. The details of radiance data bias correction are discussed in the Advanced GSI User\textquotesingle s Guide. Please note that GSI releases prior to v3.5 have coefficients for mass bias correction and angle bias correction calculated separately. \\ -Once these links are set, we are ready to run the case. +Once these links are set, we are ready to run GSI. \end{itemize} @@ -779,7 +779,7 @@ \subsection{Run GSI and Check Run Status} \label{sec5.2.2} %------------------------------------------------------------------------------- -The process for running GSI is the same as described in section \ref{sec5.1.2}. Once \textit{run\_gsi\_regional.ksh} has been submitted, move into the run directory to check the GSI analysis results. For our current case, the run directory will look almost as it did for the conventional data case, the exception being the two links to the radiance BUFR files and new diag files for the radiance data types used. Following the same steps as in section \ref{sec5.1.2}, check the \textit{stdout} file to see if GSI has run through each part of the analysis process successfully. In addition to the information outlined for the conventional run, the radiance BUFR files should have been read in and distributed to each sub domain: +The process for running GSI is the same as described in section \ref{sec5.1.2}. Once \textit{run\_gsi\_regional.ksh} has been submitted, move into the run directory to check the GSI analysis results. For the current case, the run directory will look almost as it did for the conventional data case, the exception being the two links to the radiance BUFR files and new diag files for the radiance data types used. Following the same steps as in section \ref{sec5.1.2}, check the \textit{stdout} file to see if GSI has run through each part of the analysis process successfully. In addition to the information outlined for the conventional run, the radiance BUFR files should have been read in and distributed to each sub domain: \begin{scriptsize} \begin{verbatim} @@ -800,7 +800,7 @@ \subsection{Run GSI and Check Run Status} \end{scriptsize} -When comparing this output to the content in step 3 of section \ref{sec5.1.3}, it can be seen that there are 8 new radiance data types that have been read in: HIRS4 from METOP-A, METOP-B and NOAA-19, AMSU-A from NOAA-15, NOAA-18, NOAA-19, METOP-A and METOP-B. The table above shows that most of the radiance data read in this case are AMSU-A from NOAA satellite. +When comparing this output to the content in step three of section \ref{sec5.1.3}, it can be seen that there are eight new radiance data types that have been read in: HIRS4 from METOP-A, METOP-B and NOAA-19, AMSU-A from NOAA-15, NOAA-18, NOAA-19, METOP-A, and METOP-B. The table above shows that most of the radiance data read in for this case are AMSU-A from NOAA satellite information. %------------------------------------------------------------------------------- \subsection{Diagnose GSI Analysis Results} @@ -810,11 +810,11 @@ \subsection{Diagnose GSI Analysis Results} \subsubsection{Check File fort.207} %------------------------------------------------------------------------------- -The file \textit{fort.207} contains the statistics for the radiance data, similar to file fort.203 for temperature. This file contains important details about the radiance data analysis. Section \ref{sec4.5.2} explains this file in detail. Below are some values from the file \textit{fort.207} to give a quick look at the radiance assimilation for this case study. +The file \textit{fort.207} contains the statistics for the radiance data, similar to file \textit{fort.203} for temperature. This file contains important details about the radiance data analysis. Section \ref{sec4.5.2} explains this file in detail. Below are some values from the file \textit{fort.207} to provide a quick look at the radiance assimilation for this example. The \textit{fort.207} file contains the following lines: -\hspace{4ex} For O-B, the stage before the first outer loop: +\hspace{4ex} For O-B, before the first outer loop: \begin{scriptsize} \begin{verbatim} @@ -824,7 +824,7 @@ \subsubsection{Check File fort.207} \end{verbatim} \end{scriptsize} -\hspace{4ex} For O-A, the stage after the second outer loop: +\hspace{4ex} For O-A, after the second outer loop: \begin{scriptsize} \begin{verbatim} @@ -833,11 +833,11 @@ \subsubsection{Check File fort.207} \end{verbatim} \end{scriptsize} -From the above information, it can be seen that AMSU-A data from NOAA-15 have 83190 observations within the analysis time window and domain. After thinning, 58236 of this data type remained, and only 25226 passed quality check and were used in the analysis. The penalty for this data decreased from 10356 to 4672.4 after 2 outer loops. It is also very interesting to see that the number of AMSU-A observations assimilated in the O-A calculation increased to 30136 from 25226 because more data passed quality check in 2nd outer loop. +From the above information, it can be seen that AMSU-A data from NOAA-15 provides 83190 observations within the analysis time window and domain. After thinning, 58236 observations remained, and only 25226 passed the quality check and were used in the analysis. The penalty for this data decreased from 10356 to 4672.4 after two outer loops. It is important to note that the number of AMSU-A observations assimilated in the O-A calculation increased to 30136 from 25226 as more data passed the quality check in the 2\textsuperscript{nd} outer loop. -The statistics for each channel can be viewed in the \textit{fort.207} file as well. Below channels from AMSU-A NOAA-15 are listed as an example: +The statistics for each channel can be viewed in the \textit{fort.207} file as well. Here, channels from AMSU-A NOAA-15 are listed as an example: -\hspace{4ex} For O-B, the stage before the first outer loop: +\hspace{4ex} For O-B, before the first outer loop: \begin{scriptsize} \begin{verbatim} @@ -855,7 +855,7 @@ \subsubsection{Check File fort.207} \end{verbatim} \end{scriptsize} -\hspace{4ex} For O-A, the stage after the second outer loop: +\hspace{4ex} For O-A, after the second outer loop: \begin{scriptsize} \begin{verbatim} @@ -873,39 +873,38 @@ \subsubsection{Check File fort.207} \end{verbatim} \end{scriptsize} -The second column is channel number for AMSU-A and the last column is the standard deviation for each channel. It can be seen that most of the channels fit better to the observations after the second outer loop. +The second column is the channel number for AMSU-A and the last column is the standard deviation for each channel. It can be seen that most of the channels fit better to the observations after the second outer loop. %------------------------------------------------------------------------------- \subsubsection{Check the Analysis Increment} %------------------------------------------------------------------------------- -The same methods for checking the optimal minimization as demonstrated in section \ref{sec5.1.4.2} can be used for radiance assimilation. Similar features to the conventional assimilation should be seen with the minimization. The figures below show detailed information on how the radiance data impact the analysis results on top of the conventional data. Using the same NCL script as in section \ref{sec5.1.4.3}, analysis increment fields are plotted comparing the analysis results with radiance and conventional data to the analysis results with conventional data assimilation only. The Fig \ref{fig:increments_rad2} is for level 49 and the Fig.\ref{fig:increments_rad} is for level 6, which represent the maximum temperature increment level (49) and maximum moisture increment level (6). +The same methods for checking the optimal minimization as demonstrated in section \ref{sec5.1.4.2} can be used for radiance assimilation. Similar features to the conventional assimilation should be seen with the minimization. The figures below show detailed information on how the radiance data impact the analysis results on top of the conventional data. Using the same NCL script as in section \ref{sec5.1.4.3}, analysis increment fields are plotted comparing the analysis results with radiance and conventional data to the analysis results with conventional data assimilation only. Figure \ref{fig:increments_rad2} is for vertical level 49 and Figure \ref{fig:increments_rad} is for vertical level six, representing the maximum temperature increment level (49) and maximum moisture increment level (6), respectively. \begin{figure}[h!] \centering \includegraphics[width=0.9\textwidth]{images/increments_rad} - \caption{Analysis increment fields of PrepBUFR and Radiance data analysis comparing to the analysis with PREPBUFR only at level 6} + \caption{Analysis increment fields of the prepBUFR and radiance data analysis compared to the analysis with prepBUFR only at vertical level six} \label{fig:increments_rad} \end{figure} \begin{figure}[h!] \centering \includegraphics[width=0.9\textwidth]{images/increments_rad2} - \caption{Analysis increment fields PrepBUFR and Radiance data analysis comparing to the analysis with PREPBUFR only at level 49} + \caption{Analysis increment fields of the prepBUFR and radiance data analysis compared to the analysis with prepBUFR only at vertical level 49} \label{fig:increments_rad2} \end{figure} -In order to fully understand the analysis results, the following needs to be understood: +In order to fully understand the analysis results, the following topics should be reviewed: \begin{enumerate} -\item The weighting functions of each channel and the data coverage at this analysis time. There are several sources on the Internet to show the weighting function for the AMSU-A channels. Channel 1 is the moisture channel, while the others are mainly temperature channels (Channels 2, 3 and 15 also have large moisture signals). Because a model top of 20 mb was specified for this case study, the actual impact should come from channels with the peak of the weighting below 20hPa. +\item The weighting functions of each channel and the data coverage at the analysis time. There are several sources on the internet that show the weighting functions of the AMSU-A channels. Channel one is the moisture channel, while the others are mainly temperature channels (Channels two, there, and 15 also have large moisture signals). Because a model top of 20 mb was specified for this case study, the actual impact should come from channels with peak weighting below 20 hPa. -\item The usage of each channel is located in the file named \textit{'satinfo'} in the run directory. The first two columns show the observation type and platform of the channels and the third column tells us if this channel is used in the analysis. Because a lot of amsua\_n15 and amsua\_n18 data were used, they should be checked in detail. In this case, Channels 6, 11 and 14 from amsua\_n15 and channels 9 and 14 from amsua\_n18 were turned off. +\item The usage of each channel is located in the file named \textit{'satinfo'} in the run directory. The first two columns show the observation type and platform of the channels, and the third column indicates if the channel is used in the analysis. Because many amsua\_n15 and amsua\_n18 data were used, they should be checked in detail. In this case, Channels six, 11, and 14 from amsua\_n15 and channels nine and 14 from amsua\_n18 were turned off. -\item Thinning information: a quick look at the namelist in the run directory: gsiparm.anl shows that both amsua\_n15 and amsu\_n18 using thinning grid 2, which is 60 km. In this case, the grid spacing is 30 km, which indicates to use the satellite observations every four grid-spaces, which might be a little dense. +\item Thinning information, including a quick look at the namelist in the run directory. The file "gsiparm.anl" shows that both amsua\_n15 and amsu\_n18 use thinning grid two, which is 60 km. In this case, the grid spacing is 30 km, which indicates to use the satellite observations every four grid-spaces, which might be a little dense. - -\item Bias correction: radiance bias correction was previously discussed. It is very important for a successful radiance data analysis. The run script can only link to the GDAS bias correction coefficients that are provided as an example in \textit{./fix}: +\item Bias correction: Radiance bias correction was previously discussed. It is very important for a successful radiance data analysis. The run script can only link to the GDAS bias correction coefficients that are provided as an example in \textit{./fix}: \begin{scriptsize} \begin{verbatim} @@ -914,9 +913,9 @@ \subsubsection{Check the Analysis Increment} \end{verbatim} \end{scriptsize} -Users can download the operational bias correction coefficients during the experiment period as a starting point to calculate the coefficients suitable for their experiments. \\ +Users can download the operational bias correction coefficients during their experiment period as a starting point to calculate the coefficients suitable for their experiments. \\ -Radiance bias correction for regional analysis is a difficult issue because of the limited coverage of radiance data. This topic is out of the scope of this document, but this issue should be considered and understood when using GSI with radiance applications. +Radiance bias correction for regional analyses is a difficult issue because of the limited coverage of radiance data. This topic is out of the scope of this document, but this issue should be considered and understood when using GSI with radiance applications. \end{enumerate} %------------------------------------------------------------------------------- @@ -927,7 +926,7 @@ \section{Assimilating GPS Radio Occultation Data with Regional GSI} \subsection{Run Script} %------------------------------------------------------------------------------- -The addition of GPS Radio Occultation (RO) data into the GSI analysis is similar to that of adding radiance data. In the example below, the RO data is used as refractivity. There is also an option to use the data as bending angles. The same run scripts used in sections \ref{sec5.1.1} and \ref{sec5.2.1} can be used with the addition of the following link to the observations: +The addition of GPS Radio Occultation (RO) data into the GSI analysis is similar to that of adding radiance data. In the example below, the RO data is used as refractivity. There is also an option to use the data as bending angles. The same run scripts in sections \ref{sec5.1.1} and \ref{sec5.2.1} can be used with the addition of the following link to RO observations: \begin{scriptsize} \begin{verbatim} @@ -956,7 +955,7 @@ \subsection{Run Script} \subsection{Run GSI and Check the Run Status} %------------------------------------------------------------------------------- -The process of running GSI is the same as described in section \ref{sec5.1.2}. Once \textit{run\_gsi\_regional.ksh} has been submitted, move into the working directory, \textit{gsiprd\_2014061700\_gps\_prepbufr}, to check the GSI analysis results. The run directory will look exactly the same as with the conventional data, with the exception of the link to the GPS RO BUFR files used in this case. Following the same steps as in section \ref{sec5.1.3}, check the stdout file to see if GSI has run through each part of the analysis process successfully. In addition to the information outlined for the conventional run, the GPS RO BUFR files should have been read in and distributed to each sub domain: +The process of running GSI is the same as described in section \ref{sec5.1.2}. Once \textit{run\_gsi\_regional.ksh} has been submitted, move into the working directory, \textit{gsiprd\_2014061700\_gps\_prepbufr}, to check the GSI analysis results. The run directory will look exactly the same as with the conventional data, with the exception of the link to the GPS RO BUFR files used in this case. Following the same steps as in section \ref{sec5.1.3}, check the standard output file to see if GSI has run through each part of the analysis process successfully. In addition to the information outlined for the conventional run, the GPS RO BUFR files should have been read in and distributed to each sub domain: \begin{scriptsize} \begin{verbatim} @@ -979,7 +978,7 @@ \subsection{Diagnose GSI Analysis Results} \subsubsection{Check File \textit{fort.212}} %------------------------------------------------------------------------------- -The file \textit{fort.212} shows the fit of analysis/background to the GPS/RO data in fractional difference. It has the same structure as the fit files for conventional data. Below is a quick look to be sure the GPS RO data were used: +The file \textit{fort.212} shows the fit of the analysis/background to the GPS/RO data as fractional differences. It has the same structure as the fit files for conventional data. Below is a quick look to be sure the GPS RO data were used: \begin{tiny} \begin{verbatim} @@ -990,7 +989,7 @@ \subsubsection{Check File \textit{fort.212}} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 01 all count 0 13 58 223 355 342 232 261 326 440 729 3740 o-g 01 all bias 0.00 -0.76 -0.03 -0.06 -0.04 0.01 -0.03 0.04 -0.04 -0.16 -0.18 -0.14 - o-g 01 all rms 0.00 1.41 0.75 0.96 0.79 0.35 0.32 0.42 0.54 0.57 0.55 0.59 + o-g 01 all RMS error 0.00 1.41 0.75 0.96 0.79 0.35 0.32 0.42 0.54 0.57 0.55 0.59 Observation - Analysis (O-A) @@ -999,11 +998,11 @@ \subsubsection{Check File \textit{fort.212}} ---------------------------------------------------------------------------------------------------------------------------------------------- o-g 03 all count 1 18 65 229 355 342 231 266 330 440 731 3776 o-g 03 all bias -0.40 -0.43 0.03 0.02 -0.02 -0.01 -0.02 0.00 0.01 -0.01 -0.02 0.00 - o-g 03 all rms 0.40 1.03 0.54 0.59 0.70 0.26 0.14 0.20 0.24 0.28 0.39 0.41 + o-g 03 all RMS error 0.40 1.03 0.54 0.59 0.70 0.26 0.14 0.20 0.24 0.28 0.39 0.41 \end{verbatim} \end{tiny} -It can be seen that most of the GPS RO data are located in the upper levels, with a total of 3740 observations used in the analysis during the 1st outer loop, and 3776 used to calculate O-A. After the analysis, the data bias reduced from -0.14 to 0.00, and the rms was reduced from 0.59 to 0.41. It can be concluded that the analysis with GPS RO data looks reasonable from these statistics. +It can be seen that most of the GPS RO data are located in the upper levels, with a total of 3740 observations used in the analysis during the 1\textsuperscript{st} outer loop, and 3776 used to calculate O-A. After the analysis, the data bias reduced from -0.14 to 0.00, and the RMS error was reduced from 0.59 to 0.41. It can be concluded that the analysis with GPS RO data looks reasonable from these statistics. %------------------------------------------------------------------------------- \subsubsection{Check the Analysis Increment} @@ -1011,46 +1010,46 @@ \subsubsection{Check the Analysis Increment} The same methods for checking the minimization in section \ref{sec5.1.4.2} can be used for the GPS RO assimilation. -The following figures give detailed information of how the new data impacts the analysis result. Using the NCL script used in section \ref{sec5.1.4}, analysis increment fields are plotted comparing the analysis results with GPS RO and conventional data to the analysis results with conventional data assimilation only for level 48, which represents the maximum temperature increment. +The following figures provide detailed information about how the new data impacts the analysis. Using the NCL script from section \ref{sec5.1.4}, analysis increment fields are plotted comparing the analysis results with GPS RO and conventional data to the analysis results with conventional data assimilation only for vertical level 48, which represents the maximum temperature increment. \begin{figure}[h!] \centering \includegraphics[width=0.9\textwidth]{images/increments_bufr} - \caption{Analysis increment fields comparing the use of GPS RO and conventional observations to create the analysis to only PrepBUFR at level 48.} + \caption{Analysis increment fields comparing the use of GPS RO and conventional observations to only prepBUFR at vertical level 48.} \label{fig:increments_bufr} \end{figure} %------------------------------------------------------------------------------- -\section{Introduction to GSI 3D Hybrid EnVar Analysis} +\section{Introduction to GSI Hybrid 3DEnVar Analysis} %------------------------------------------------------------------------------- -The 3 dimensional hybrid ensemble-variational (3D hybrid EnVar) analysis is an important analysis option in the GSI system that has been used by operations. It provides the ability to bring the flow dependent background error covariance into the analysis based on ensemble forecasts. If ensemble forecasts have been generated, setting up GSI to do a hybrid analysis is straightforward and only requires two changes in the run script in addition to the current 3DVAR run script: +The three-dimensional hybrid ensemble-variational (hybrid 3DEnVar) analysis is an important option in the GSI system that has been used operationally. It provides the ability to bring the flow dependent background error covariance into the analysis based on ensemble forecasts. If ensemble forecasts have been generated, setting up GSI to do a hybrid analysis is straightforward and only requires two changes in the run script in addition to the current 3DVAR run script: \begin{itemize}[leftmargin=*] \item \textit{Change 1: Link the ensemble members to the GSI run directory}\\ -This change is to link the ensemble members to the GSI run directory and assign each ensemble member a name that GSI recognizes. The release version GSI can accept 4 kinds of ensemble forecasts, which is controlled by the namelist variable \textit{regional\_ensemble\_option}. Table \ref{tab51} gives a list of options for \textit{regional\_ensemble\_option} and the naming convention for linking the ensemble to GSI recognized names. \\ +This change is required to link the ensemble members to the GSI run directory and assign each ensemble member a name that GSI recognizes. GSI can accept four kinds of ensemble forecasts, controlled by the namelist variable \textit{regional\_ensemble\_option}. Table \ref{tab51} provides a list of options for \textit{regional\_ensemble\_option} and the naming convention for linking the ensemble files to GSI recognized names. \\ \begin{table}[htbp] \centering \begin{small} -\caption{the list of ensemble forecasts that can be read by GSI hybrid} +\caption{List of ensemble forecasts that can be read by GSI} \begin{tabular}{|p{1.7cm}|p{4cm}|p{4.6cm}|p{4cm}|} \hline \hline regional\_ ensemble\_ option & explanation & Function called & GSI recognized ensemble file names \\ \hline -1 & GFS ensemble internally interpolated to hybrid grid & get\_gefs\_for\_regional & \textit{filelist : a text file including path and name of ensemble files} \\ +1 & GFS ensemble internally interpolated to hybrid grid & get\_gefs\_for\_regional & \textit{filelist : a text file including the path and name of ensemble files} \\ \hline -2 & ensembles are WRF NMM (HWRF) format & get\_wrf\_nmm\_ensperts & +2 & Ensemble is in WRF-NMM (HWRF) format & get\_wrf\_nmm\_ensperts & \textit{d01\_en001},\newline \textit{d01\_en002},\newline ... \\ \hline -3 & ensembles are ARW netcdf format & get\_wrf\_mass\_ensperts\_netcdf & +3 & Ensemble is in ARW netcdf format & get\_wrf\_mass\_ensperts\_netcdf & \textit{wrf\_en001}, \newline \textit{wrf\_en002}, \newline ... \\ \hline -4 & ensembles are NEMS NMMB format & get\_nmmb\_ensperts & +4 & Ensemble is in NMMB format & get\_nmmb\_ensperts & \textit{nmmb\_ens\_mem001},\newline \textit{nmmb\_ens\_mem002},\newline ...\\ \hline @@ -1060,7 +1059,7 @@ \section{Introduction to GSI 3D Hybrid EnVar Analysis} \end{table} -Users have to change the GSI run script to add the links to the ensemble forecasts if they want to use the GSI hybrid function. Below is an example of using ensembles of ARW netcdf format, assuming that all the ensemble members are located in a directory defined by the parameter \textit{\${mempath}} and the ensemble members have a name such as: \textit{wrfout\_d01\_\${iiimem}}, where \textit{\${iiimem}} is an integer indicating the ensemble member ID. The following lines should be added to the run script with loop \textit{iiimem} from 1 to the total number of ensemble members: +Users have to change the GSI run script to add the links to the ensemble forecasts if they want to use the GSI hybrid function. Below is an example of using an ensemble in ARW netcdf format, assuming that all the ensemble members are located in a directory defined by the parameter \textit{\${mempath}} and the ensemble members have a name such as: \textit{wrfout\_d01\_\${iiimem}}, where \textit{\${iiimem}} is an integer indicating the ensemble member ID. The following lines should be added to the run script with loop \textit{iiimem} from one to the total number of ensemble members: \begin{scriptsize} \begin{verbatim} @@ -1074,23 +1073,23 @@ \section{Introduction to GSI 3D Hybrid EnVar Analysis} \item \textit{Change 2: Set up the namelist options in section HYBRID\_ENSEMBLE}\\ -Users need to set \verb|l_hyb_ens=.true.| to turn on hybrid ensemble analysis. Commonly used namelist options for the hybrid analysis are listed in table \ref{tab52}: +Users need to set \verb|l_hyb_ens=.true.| to turn on the hybrid ensemble analysis. Commonly used namelist options for the hybrid analysis are listed in table \ref{tab52}: \begin{table}[htbp] \centering -\caption{the list of namelist options for GSI hybrid} +\caption{The list of namelist options for GSI hybrid} \begin{tabular}{|p{3cm}|p{11cm}|} \hline -Options & explanation \\ +Options & explanation \\ \hline -l\_hyb\_ens & if true, turn on hybrid ensemble option; \\ +l\_hyb\_ens & if true, turn on hybrid ensemble option \\ \hline uv\_hyb\_ens & if true, ensemble perturbation wind variables are u and v; \newline -otherwise, ensemble perturbation wind variables are stream function and velocity potential functions. \\ +otherwise, ensemble perturbation wind variables are stream function and velocity potential \\ \hline -generate\_ens & if true, generate internal ensemble based on existing background error; recommended to be false. \\ +generate\_ens & if true, generate an internal ensemble based on the existing background error; recommended = false \\ \hline -n\_ens & number of ensemble members. \\ +n\_ens & number of ensemble members \\ \hline beta1\_inv& (1/beta1), the weight given to the static background error covariance. 0 <= beta1\_inv <= 1, should be tuned for optimal performance; beta2\_inv = 1 - beta1\_inv is the weight given to the ensemble derived covariance \newline =1, ensemble information turned off \newline @@ -1098,18 +1097,18 @@ \section{Introduction to GSI 3D Hybrid EnVar Analysis} \hline s\_ens\_h & homogeneous isotropic horizontal ensemble localization scale (km) \\ \hline -s\_ens\_v & vertical localization scale. \newline +s\_ens\_v & vertical localization scale \newline If positive, in grid units; \newline -if negative, in lnp unit. \\ +if negative, in lnp unit \\ \hline regional\_ensemble\newline -\_option & integer, used to select type of ensemble to read in for regional applications. Currently takes values from 1 to 4: \newline +\_option & integer, used to select the type of ensemble to read in for regional applications. Currently takes values from one to four: \newline =1: use GEFS internally interpolated to ensemble grid; \newline - =2: ensembles are in WRF NMM format; \newline + =2: ensembles are in WRF-NMM format; \newline =3: ensembles are in ARW netcdf format; \newline - =4: ensembles are in NEMS NMMB format. \\ + =4: ensembles are in NMMB format. \\ \hline -grid\_ratio\_ens & for regional runs, ratio of ensemble grid resolution to analysis grid resolution. If turned on and specified an appropriate value, could increase the computational efficiency. \\ +grid\_ratio\_ens & for regional runs, the ratio of ensemble to analysis grid resolution. If turned on and specified with an appropriate value, this could increase the computational efficiency. \\ \hline \end{tabular} \label{tab52} @@ -1117,14 +1116,53 @@ \section{Introduction to GSI 3D Hybrid EnVar Analysis} Please note: the parameters \verb|s_ens_h|, \verb|s_ens_v|, and \verb|beta1_inv| are tunable parameters. They should be tuned for best performance.\\ -After setup of the namelist parameters and the path and name of the ensemble members, GSI can be run following the other 3DVAR cases introduced in this chapter. And the same procedures could be followed as in the previous sections to check the run status and diagnose the GSI analysis. +After setting up the namelist parameters and the path/name of the ensemble members, GSI can be run in the same manner as the other 3DVAR cases introduced in this chapter. The same procedures could be followed as in the previous sections to check the run status and diagnose the GSI analysis. \end{itemize} +\newpage +%------------------------------------------------------------------------------- +\section{Introduction to GSI Hybrid 4DEnVar Analysis} +%------------------------------------------------------------------------------- + +The GSI hybrid 4DEnVAR analysis is similar to the hybrid 3DEnVar except that the hybrid 4DEnVar will use multiple background files and GFS ensemble forecasts. As an example, the following shows how to conduct a hybrid 4DEnVar analysis using three time levels of background files and ensembles. + +Before creating a hybrid 4DEnVar analysis, be sure to read the previous section about how to run hybrid 3DEnVar first. The following steps are additional procudures beyond hybrid 3DEnVar and assume that all hybrid 3DEnVar settings have already been set. + +(1). Set \textit{if\_4DEnVar=Yes} in \textit{run\_gsi\_regional.ksh}. + +(2). Set the correct background files and ensemble files at different time levels in \textit{run\_gsi\_regional.ksh}. See the following example: +\begin{scriptsize} +\begin{verbatim} + if [ ${if_4DEnVar} = Yes ] ; then + BK_FILE_P1=${BK_ROOT}/wrfout_d01_2017-05-13_19:00:00 + BK_FILE_M1=${BK_ROOT}/wrfout_d01_2017-05-13_17:00:00 + + ENSEMBLE_FILE_mem_p1=${ENS_ROOT}/sfg_2017051312_fhr09s + ENSEMBLE_FILE_mem_m1=${ENS_ROOT}/sfg_2017051312_fhr03s + fi +\end{verbatim} +\end{scriptsize} + + Note that the background file at the analysis time (201705131800 for the above example) is set by \textit{BK\_FILE} and the ensemble files at the analysis time are set by \textit{ENSEMBLE\_FILE\_mem} as introduced in previous sections. See the following example: + +\begin{scriptsize} +\begin{verbatim} + BK_FILE=${BK_ROOT}/wrfout_d01_2017-05-13_18:00:00 + ... + if [ ${if_hybrid} = Yes ] ; then + ... + ENSEMBLE_FILE_mem=${ENS_ROOT}/sfg_2017051312_fhr06s + ... +\end{verbatim} +\end{scriptsize} + +Now GSI can be run following the hybrid 3DEnVar case introducted in the prevous section. Similar procedures can be conducted to check the GSI run status and results. + %------------------------------------------------------------------------------- \section{Summary} %------------------------------------------------------------------------------- -This chapter applied the knowledge from the previous chapters to demonstrate how to set up, run, and analyze GSI for various regional applications. It is important to always check for successful GSI analysis, as running to completion does not always indicate a successful run. Using the tools and methods described in this chapter, a complete picture of the GSI analysis can be obtained. +This chapter applied previous information outlined in the user\textquotesingle s guide to demonstrate how to set up, run, and analyze GSI for various regional applications. It is important to always check for a successful GSI analysis, as running to completion does not always indicate a successful analysis was generated. Using the tools and methods described in this chapter, a complete picture of the GSI analysis can be obtained. -It is important to realize that GSI applications are not limited to regional analysis with WRF. Other GSI applications, including the global analysis for GFS, the chemical analysis, and others will be introduced in the next chapter . +It is important to realize that GSI applications are not limited to regional analyses with WRF. Other GSI applications, including global analyses for GFS, chemical analyses, and others will be introduced in the next chapter. diff --git a/doc/GSI_user_guide/gsi_ch6.tex b/doc/GSI_user_guide/gsi_ch6.tex index 98bab48e5..b6905ed27 100644 --- a/doc/GSI_user_guide/gsi_ch6.tex +++ b/doc/GSI_user_guide/gsi_ch6.tex @@ -1,4 +1,4 @@ -\chapter{Introduction to more GSI Applications} +\chapter{Introduction to more GSI Applications}\label{gsi_global} \setlength{\parskip}{12pt} %------------------------------------------------------------------------------- @@ -6,13 +6,13 @@ \section{Introduction to Global GSI analysis} %------------------------------------------------------------------------------- The \textit{Global Forecast System (GFS)} is a global numerical weather prediction system containing a global computer model and variational analysis -run by the U.S. National Weather Service (NWS). As of February 2015, the mathematical model is run four times a day, and produces forecasts for up -to 16 days in advance, with decreased spatial resolution after 10 days. The model is a spectral model with a resolution of T1534 from 0 to 240 hours +run by the U.S. National Weather Service (NWS). As of February 2015, the numerical model is run four times a day, and produces forecasts for up +to 16 days in advance, with decreased spatial resolution after 10 days. It is a spectral model with a resolution of T1534 from 0 to 240 hours (0-10 days) and T574 from 240 to 384 hours (10-16 days). In the vertical, the model is divided into 64 layers and temporally, it produces forecast output every hour for the first 12 hours, every 3 hours out to 10 days, and every 12 hours after that. Its data assimilation system runs 6-hourly continuous cycles using the GSI-hybrid. -GSI has many functions specially designed and tuned for GFS. Although the release version of the community GSI includes all the functions used by the +GSI has many functions specifically designed and tuned for GFS. Although the release version of the community GSI includes all the functions used by the operational systems, the DTC can only support the GSI regional applications because the DTC is not able to run GFS on community computers. Beginning with release version 3.2, the DTC began to introduce the use of GSI for global applications, assuming users can obtain the GFS background through the NCEP data hub or by running GFS themselves. @@ -22,21 +22,20 @@ \subsection{The Difference between Global and Regional GSI} %------------------------------------------------------------------------------- As mentioned above, all of the NCEP operational systems use GSI as their analysis system. The majority of the GSI code is common to these -operational systems. Very little of the source code is specific to a particular operational system. The main differences in GSI operational application +operational systems. Very little source code is specific to a particular operational system. The main differences in the GSI operational application come from the configuration the run scripts and namelist parameters. The different GSI applications need different backgrounds, observations, and fixed files. For the GFS system, GSI needs: \begin{itemize} -\item GFS Backgrounds: typically, GSI uses 6-h GFS forecasts as the background. GFS 3-h and 9-h forecasts are also needed for the FGAT function in +\item GFS Backgrounds: Typically, GSI uses 6-h GFS forecasts as the background. GFS 3-h and 9-h forecasts are also needed for the FGAT function in the GSI analysis. Both surface and atmosphere forecasts are needed. -\item Observations: NCEP has several sets of BUFR/PrepBUFR observations files with global coverage for global systems. The files that start with the +\item Observations: NCEP has several sets of BUFR/prepBUFR observation files with global coverage for global systems. The files that start with the prefix \textbf{GDAS} are for the 6-hourly global data assimilation system. These files have more data available for the analysis, but have a longer delay -for use in real-time. The files that start with \textbf{gfs} are for the 4 time daily GFS forecast. The different operational systems need different observation -data files because they require different kinds of observations with different coverage, cut-off times, and quality control processes. All these observation +for use in real-time. The files that start with \textbf{gfs} are for the GFS forecasts. Different operational systems need different observation data files because they require different kinds of observations with different coverage, cut-off times, and quality control processes. All these observation files are read in and processed in GSI by the same section of code. Therefore, there is no problem using GFS observation data files for regional GSI applications, as is described in the practice cases and the GSI User\textquotesingle s Guide. Using regional BUFR files for global applications will cause -scientific problems since the data only covers part of the analysis domain, but GSI can still read in the observations and perform the analysis. +problems since the data only cover part of the analysis domain, but GSI can still read in the observations and perform the analysis. \item Fixed files: Section 3.1 of the GSI User\textquotesingle s Guide introduced the notion that different operational systems have their own fixed files. These global fixed files can be downloaded as a separate tar ball from the GSI user\textquotesingle s website (\url{http://www.dtcenter.org/com-GSI/users/downloads/index.php}). For the GFS GSI application, the big difference is the background error covariance (BE). @@ -67,7 +66,7 @@ \subsection{Global GFS Scripts} \begin{table}[htbp] \centering -\caption{The grid dimensions for GFS} +\caption{The grid dimensions for GFS.} \begin{tabular}{|p{3cm}|p{4.5cm}|p{4.45cm}|} \hline &EULERIAN&SEMI-LAGRANGIAN\\ @@ -107,14 +106,14 @@ \subsection{Global GFS Scripts} \end{table} The first part of the global analysis run script, just as in the regional script, sets up the computer environment and case configuration. The primary -differences between the global and regional are the specification of the GFS case and the global application namelist. +differences between the global and regional scripts are the specification of the GFS case and the global application namelist. \begin{small} \begin{verbatim} GFSCASE=T126 GSI_NAMELIST=${GSI_ROOT}/run/comgsi_namelist_gfs.sh \end{verbatim} \end{small} -While the regional script simply specifies the background and BE files, the global script needs to know the background resolution setup by defining the +While the regional script simply specifies the background and BE files, the global script needs to know the background resolution by defining the following parameters: \begin{scriptsize} \begin{verbatim} @@ -144,7 +143,7 @@ \subsection{Global GFS Scripts} \end{verbatim} \end{scriptsize} -Just as with the regional analysis run script, the global script double checks the needed parameters, creates a run directory, copies the background, observations, and fixed files into the run directory. It generates the namelist, and places that in the run directory as well. +Just as with the regional analysis run script, the global script double checks the needed parameters, creates a run directory, and copies the background, observations, and fixed files into the run directory. It generates the namelist, and places it in the run directory as well. \begin{enumerate} \item Specify the values of \verb|LATA|, \verb|LONA|, \verb|DELTIME|, \verb|resol| based on the choice of \verb|JCAP|: \begin{scriptsize} @@ -257,9 +256,9 @@ \subsection{Global GFS Scripts} fi \end{verbatim} \end{scriptsize} -Both surface and atmosphere files at 03, 06, 09 hour forecasts are needed. \\ -\item More observations files are available\\ -In the sample run script, many more observations are listed for use: +Both surface and atmosphere files at 03, 06, and 09 hour forecasts are needed. \\ +\item More observations files are available\\ +In the sample run script, many more observations are listed for use: \begin{scriptsize} \begin{verbatim} # Link to the other observation data @@ -340,12 +339,10 @@ \subsection{Sample Results} \end{verbatim} \end{scriptsize} -The majority of these files were seen after running the GSI regional analysis examples in section \ref{sec3.2.3} of the Basic User\textquotesingle s +The majority of these files existed after running the GSI regional analysis examples in section \ref{sec3.2.3} of the Basic User\textquotesingle s Guide, and they provide the same information about the GSI run. Of note, the GSI global analysis run includes more radiance observations, resulting -in more radiance \verb|diag| files in this list. Instead of the single background file \verb|wrf_inout| as seen with the regional analysis, the global analysis -background is split between the two files \verb|siganl|, for atmosphere, and \verb|sfcanl.gsi| for the surface. A quick check of the standard output file -\verb|stdout| shows information similar to that generated by the regional runs for the namelist, data ingest, and minimization, but is quite different with -respect to information on the background IO. +in more radiance \verb|diag| files in this list. Instead of the single background file \verb|wrf_inout| as seen with the regional analysis, the global analysis background is split between the two files \verb|siganl|, for the atmosphere, and \verb|sfcanl.gsi| for the surface. A quick check of the standard output file +\verb|stdout| shows information similar to that generated by the regional runs for the namelist, data ingest, and minimization, but is quite different with respect to information on the background IO. Please visit our online tutorial for more details regarding how to conduct a global GSI run. @@ -353,7 +350,7 @@ \subsection{Sample Results} \section{Introduction to Chemical Analysis} %------------------------------------------------------------------------------- -The GSI has been developed to analyze chemical observations, such as MODIS AOD or PM2.5, to improve the pollution forecast with chemical models. +The GSI has also been developed to analyze chemical observations, such as MODIS AOD or PM2.5, to improve the pollution forecasts with chemical models. In this release, GSI can do the following chemical analyses: \begin{table}[htbp] @@ -375,7 +372,7 @@ \section{Introduction to Chemical Analysis} \label{tab62} \end{table} -The GSI run scripts for chemical analysis (\verb|./run/run_gsi_chem.ksh| ) and namelist (\verb|./run/comgsi_namelist_chem.sh|) are provided with this release. +The GSI run script for a chemical analysis (\verb|./run/run_gsi_chem.ksh| ) and associated namelist (\verb|./run/comgsi_namelist_chem.sh|) are provided with this release. Sample background and observation files are provided through the on-line tutorial. %------------------------------------------------------------------------------- @@ -383,10 +380,10 @@ \subsection{Setup GSI Run Scripts for Chemical Analysis} %------------------------------------------------------------------------------- The script \verb|run_gsi_chem.ksh| was built based on regional GSI run scripts and has a similar structure to the regional run script \verb|run_gsi_regional.ksh|, -but include a couple of different details. +but include a couple of differences. -The first part of the run script sets up the computer environment and case configuration. This is the similar to the regional analysis run scripts, except -for the specification of the chemical cases (\verb|bk_core| and \verb|obs_type|), and the namelist for the chemical application: +The first part of the run script sets up the computer environment and case configuration. This is similar to the regional analysis run scripts, except +for the specification of (\verb|bk_core| and \verb|obs_type|) for a given chemical case, and the namelist for the chemical application: \begin{scriptsize} \begin{verbatim} GSI_NAMELIST=${GSI_ROOT}/run/comgsi_namelist_chem.sh @@ -398,13 +395,12 @@ \subsection{Setup GSI Run Scripts for Chemical Analysis} obs_type=PM25 \end{verbatim} \end{scriptsize} -The choice of the chemical cases (\verb|bk_core| and \verb|obs_type|) need to match with the options \verb|PREPBUFR| and \verb|BK_FILE|, which -set background and observation files. Table \ref{tab63} shows how to setup these two options for each case: +The choices of (\verb|bk_core| and \verb|obs_type|) for a chemical case need to match with the options \verb|PREPBUFR| and \verb|BK_FILE|, which set background and observation files. Table \ref{tab63} shows how to set up these two options for each case: \begin{table}[htbp] \centering \begin{footnotesize} -\caption{List of GSI chemical analyses} +\caption{List of GSI chemical analyses.} \begin{tabular}{|p{0.7cm}|p{7cm}|p{7cm}|} \hline case&background (\textit{BK\_FILE} ; \textit{bk\_core} ) & Observation (\textit{PREPBUFR}; \textit{obs\_type})\\ @@ -425,10 +421,7 @@ \subsection{Setup GSI Run Scripts for Chemical Analysis} \label{tab63} \end{footnotesize} \end{table} -Similar to the regional run script, this chemical run script will also double check the needed parameters. Then it creates a run directory and generates -the namelist in the directory and copies the background, observations, and fixed files into the run directory. Users who run the cases listed in table -\ref{tab62} do not need to change the rest of the run scripts. But users who need to build new cases may need to know the differences between -chemical and regional applications, which is shown below. +Similar to the regional run script, this chemical run script will also double check the needed parameters. Then it creates a run directory, generates the namelist, and copies the background, observations, and fixed files into the run directory. Users who run the cases listed in table \ref{tab62} do not need to change the rest of the run script. But users who need to build new cases may need to know the differences between chemical and regional applications, which is shown below. \begin{enumerate} \item Specify the name of the background and observations: @@ -507,7 +500,7 @@ \subsection{Setup GSI Run Scripts for Chemical Analysis} \subsection{Sample Results} %------------------------------------------------------------------------------- -In this section, the case 1 in Table \ref{tab62} will be used as example. After a successful run of the GSI Chem analysis, the contents of the run directory, with the clean option turned on, will look something like this: +In this section, case one in Table \ref{tab62} will be used as an example. After a successful run of the GSI Chem analysis, the contents of the run directory, with the clean option turned on, will look something like this: \begin{scriptsize} \begin{verbatim} aeroinfo fit_w1.2012060318 fort.212 fort.226 pcpinfo @@ -525,10 +518,10 @@ \subsection{Sample Results} \end{verbatim} \end{scriptsize} -Following the instruction in Chapter 5, the following steps are conducted to check the results of this GSI chemical analysis: +Following instructions from Chapter 5, the following steps are conducted to check the results of this GSI chemical analysis: \begin{enumerate} -\item Check stdout file: \\ +\item Check the standard output file: \\ \begin{itemize} \item Read in chemical background fields: @@ -612,19 +605,12 @@ \subsection{Sample Results} \end{itemize} \item Analysis increments: -After successfully run through, the analysis increment should be check the see if the data impact are reasonable. +After successfully running GSI, the analysis increments should be checked to see if data impacts are reasonable. \begin{figure}[h!] \centering \includegraphics[width=0.8\textwidth]{./images/ch6_chem_inc_seas1_bc1.png} - \caption{The analysis increment in the lowest level for SEAS\_1 (left) and BC1 (right)} + \caption{Analysis increments in the lowest level for SEAS\_1 (left) and BC1 (right).} \label{fig:chem} \end{figure} - \end{enumerate} - - - - - - diff --git a/doc/GSI_user_guide/references.bib b/doc/GSI_user_guide/references.bib index be2b1c7e2..17de7ed32 100644 --- a/doc/GSI_user_guide/references.bib +++ b/doc/GSI_user_guide/references.bib @@ -19,7 +19,7 @@ @article{Gaspari1999 author={G. Gaspari and S. E. Cohn}, journal={Quarterly Journal of the Royal Meteorological Society}, volume={125}, - Issue={554} + Issue={554}, pages={723-757}, year={1999}, } diff --git a/doc/GSI_user_guide/title.tex b/doc/GSI_user_guide/title.tex index f1d41cd13..7741a66dd 100644 --- a/doc/GSI_user_guide/title.tex +++ b/doc/GSI_user_guide/title.tex @@ -8,19 +8,21 @@ \includegraphics[width=0.8\textwidth]{images/GSIbanner.png}\\[2em] {\color{darkcerulean} - \Huge{User's Guide Version 3.5 }\\[1em] - \normalsize{August 2016}\\[5em] + \Huge{User's Guide Version 3.7}\\[1em] + \normalsize{September 2017}\\[5em] } % \normalsize{by}\\[1em] - \normalsize{Ming Hu}\\ + \normalsize{Ming Hu, Guoqing Ge}\\ \textit{\small{National Oceanic and Atmospheric Administration (NOAA)/Earth System Research Laboratory}}\\ \textit{\small{Cooperative Institute for Research in Environmental Sciences (CIRES)}}\\[2em] \normalsize{Hui Shao, Don Stark, Kathryn Newman, Chunhua Zhou}\\ \textit{\small{National Center for Atmospheric Research (NCAR)}}\\[2em] + \normalsize{Jeff Beck}\\ + \textit{\small{NOAA/Earth System Research Laboratory and CIRA}}\\[2em] \normalsize{Xin Zhang}\\ - \textit{\small{NOAA/Earth System Research Laboratory and CIRES}}\\[4em] - + \textit{\small{University Corporation for Atmospheric Research (UCAR)}}\\[2em] + \includegraphics[width=0.5\textwidth]{images/DTClogo.png}\\ \vspace{1em} diff --git a/fix b/fix index f58850055..498693216 160000 --- a/fix +++ b/fix @@ -1 +1 @@ -Subproject commit f58850055f4ceb8b07f073ddbdc7aaad97258819 +Subproject commit 498693216334697195f99d385e6f0aa5be6e3645 diff --git a/libsrc b/libsrc index ad442980f..b80d19af7 160000 --- a/libsrc +++ b/libsrc @@ -1 +1 @@ -Subproject commit ad442980f4fe743e58976124538a460fb9b1c7f8 +Subproject commit b80d19af781e26590ed4cb9d9a0b0c13e812e663 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ef0748f0a..ba1858275 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -144,4 +144,4 @@ cmake_minimum_required(VERSION 2.8) target_link_libraries(${GSIEXEC} ${GSISHAREDLIB} ${GSILIB} ${GSISHAREDLIB} ${WRF_LIBRARIES} ${NETCDF_LIBRARIES_F90} ${NETCDF_LIBRARIES} ${HDF5_Fortran_HL_LIBRARIES} ${MPI_Fortran_LIBRARIES} ${LAPACK_LIBRARIES} -L./ ${EXTRA_LINKER_FLAGS} ${HDF5_LIBRARIES} ${CURL_LIBRARIES} ${CORE_LIBRARIES} ${CORE_BUILT} - ${GSI_LDFLAGS} ${NCDIAG_LIBRARIES} ${ZLIB_LIBRARIES} ${wrflib} ) + ${GSI_LDFLAGS} ${NCDIAG_LIBRARIES} ${ZLIB_LIBRARIES} ${wrflib} ${EXTRA_LINKER_FLAGS} ) diff --git a/src/control2state.f90 b/src/control2state.f90 index 9d9f87196..f53b2b7fa 100644 --- a/src/control2state.f90 +++ b/src/control2state.f90 @@ -124,13 +124,18 @@ subroutine control2state(xhat,sval,bval) 'qr ', 'qs ', 'qg ', 'qh ' /) logical :: ls_u,ls_v,ls_w,ls_prse,ls_q,ls_tsen,ls_ql,ls_qi logical :: ls_qr,ls_qs,ls_qg,ls_qh -real(r_kind),pointer,dimension(:,:) :: sv_ps,sv_sst -real(r_kind),pointer,dimension(:,:) :: sv_gust,sv_vis,sv_pblh,sv_wspd10m,sv_tcamt,sv_lcbas -real(r_kind),pointer,dimension(:,:) :: sv_td2m,sv_mxtm,sv_mitm,sv_pmsl,sv_howv,sv_cldch -real(r_kind),pointer,dimension(:,:) :: sv_uwnd10m,sv_vwnd10m -real(r_kind),pointer,dimension(:,:,:) :: sv_u,sv_v,sv_w,sv_prse,sv_q,sv_tsen,sv_tv,sv_oz -real(r_kind),pointer,dimension(:,:,:) :: sv_rank3 -real(r_kind),pointer,dimension(:,:) :: sv_rank2 +real(r_kind),pointer,dimension(:,:) :: sv_ps=>NULL(),sv_sst=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_gust=>NULL(),sv_vis=>NULL(),sv_pblh=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_wspd10m=>NULL(),sv_tcamt=>NULL(),sv_lcbas=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_td2m=>NULL(),sv_mxtm=>NULL(),sv_mitm=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_pmsl=>NULL(),sv_howv=>NULL(),sv_cldch=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_uwnd10m=>NULL(),sv_vwnd10m=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: sv_u=>NULL(),sv_v=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: sv_w=>NULL(),sv_prse=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: sv_q=>NULL(),sv_tsen=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: sv_tv=>NULL(),sv_oz=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: sv_rank3=>NULL() +real(r_kind),pointer,dimension(:,:) :: sv_rank2=>NULL() real(r_kind),allocatable,dimension(:,:,:):: uland,vland,uwter,vwter diff --git a/src/control2state_ad.f90 b/src/control2state_ad.f90 index 6c28dd799..450e89b2a 100644 --- a/src/control2state_ad.f90 +++ b/src/control2state_ad.f90 @@ -115,13 +115,16 @@ subroutine control2state_ad(rval,bval,grad) 'qr ', 'qs ', 'qg ', 'qh ' /) logical :: ls_u,ls_v,ls_w,ls_prse,ls_q,ls_tsen,ls_ql,ls_qi logical :: ls_qr,ls_qs,ls_qg,ls_qh -real(r_kind),pointer,dimension(:,:) :: rv_ps,rv_sst -real(r_kind),pointer,dimension(:,:) :: rv_gust,rv_vis,rv_pblh,rv_wspd10m,rv_tcamt,rv_lcbas -real(r_kind),pointer,dimension(:,:) :: rv_td2m,rv_mxtm,rv_mitm,rv_pmsl,rv_howv,rv_cldch -real(r_kind),pointer,dimension(:,:) :: rv_uwnd10m,rv_vwnd10m -real(r_kind),pointer,dimension(:,:,:) :: rv_u,rv_v,rv_w,rv_prse,rv_q,rv_tsen,rv_tv,rv_oz -real(r_kind),pointer,dimension(:,:,:) :: rv_rank3 -real(r_kind),pointer,dimension(:,:) :: rv_rank2 +real(r_kind),pointer,dimension(:,:) :: rv_ps=>NULL(),rv_sst=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_gust=>NULL(),rv_vis=>NULL(),rv_pblh=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_wspd10m=>NULL(),rv_tcamt,rv_lcbas=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_td2m=>NULL(),rv_mxtm=>NULL(),rv_mitm=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_pmsl=>NULL(),rv_howv=>NULL(),rv_cldch=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_uwnd10m=>NULL(),rv_vwnd10m=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: rv_u=>NULL(),rv_v=>NULL(),rv_w=>NULL(),rv_prse=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: rv_q=>NULL(),rv_tsen=>NULL(),rv_tv=>NULL(),rv_oz=>NULL() +real(r_kind),pointer,dimension(:,:,:) :: rv_rank3=>NULL() +real(r_kind),pointer,dimension(:,:) :: rv_rank2=>NULL() real(r_kind),allocatable,dimension(:,:,:):: uland,vland,uwter,vwter diff --git a/src/crtm_interface.f90 b/src/crtm_interface.f90 index c30d39883..683b7ddc9 100644 --- a/src/crtm_interface.f90 +++ b/src/crtm_interface.f90 @@ -506,7 +506,8 @@ subroutine init_crtm(init_pass,mype_diaghdr,mype,nchanl,isis,obstype,radmod) ! Are there aerosols to affect CRTM? if (radmod%laerosol_fwd) then - allocate(aero(nsig,n_actual_aerosols),aero_conc(msig,n_actual_aerosols),auxrh(msig)) + if(.not.allocated(aero)) allocate(aero(nsig,n_actual_aerosols)) + if(.not.allocated(aero_conc)) allocate(aero_conc(msig,n_actual_aerosols),auxrh(msig)) n_actual_aerosols_wk=n_actual_aerosols n_aerosols_fwd_wk=n_aerosols_fwd n_aerosols_jac_wk=n_aerosols_jac @@ -644,7 +645,7 @@ subroutine init_crtm(init_pass,mype_diaghdr,mype,nchanl,isis,obstype,radmod) ! Allocate structures for radiative transfer - if (radmod%lcloud_fwd .and. (.not. mixed_use)) & + if (radmod%lcloud_fwd .and. (.not. mixed_use) .and. (.not. allocated(rtsolution0)) ) & allocate(rtsolution0(channelinfo(sensorindex)%n_channels,1)) allocate(& @@ -1464,7 +1465,7 @@ subroutine call_crtm(obstype,obstime,data_s,nchanl,nreal,ich, & if(obs_time(2) > 2) day_of_year = day_of_year + leap_day call ssu_input_setvalue( options%SSU, & - Time=float(obs_time(1)) + float(day_of_year)/(365.0_r_kind+leap_day)) + Time=dble(obs_time(1)) + dble(day_of_year)/(365.0_r_kind+leap_day)) endif diff --git a/src/enkf/CMakeLists.txt b/src/enkf/CMakeLists.txt index b16939462..be89b3a00 100644 --- a/src/enkf/CMakeLists.txt +++ b/src/enkf/CMakeLists.txt @@ -8,6 +8,9 @@ if(BUILD_ENKF) if(BUILD_PRODUCTION) set(ENKF_SUFFIX "gfs") set(ENKF_BASE "global_enkf") + elseif(BUILD_WRF) + set(ENKF_SUFFIX "wrf") + set(ENKF_BASE "enkf_wrf") elseif(BUILD_GFS) set(ENKF_SUFFIX "gfs") set(ENKF_BASE "enkf_gfs") @@ -70,5 +73,5 @@ if(BUILD_ENKF) add_executable(${ENKFEXEC} enkf_main.f90) target_link_libraries(${ENKFEXEC} enkflib enkfdeplib ${GSISHAREDLIB} ${GSILIB} ${CORE_LIBRARIES} ${MPI_Fortran_LIBRARIES} ${LAPACK_LIBRARIES} ${NETCDF_LIBRARIES_F90} ${NETCDF_LIBRARIES} ${HDF5_Fortran_HL_LIBRARIES} - ${EXTRA_LINKER_FLAGS} ${HDF5_LIBRARIES} ${CURL_LIBRARIES} ${GSI_LDFLAGS} ${CORE_BUILT} ${CORE_LIBRARIES} ${CORE_BUILT} ${NCDIAG_LIBRARIES} ) + ${EXTRA_LINKER_FLAGS} ${HDF5_LIBRARIES} ${CURL_LIBRARIES} ${GSI_LDFLAGS} ${CORE_BUILT} ${CORE_LIBRARIES} ${CORE_BUILT} ${NCDIAG_LIBRARIES} ${EXTRA_LINKER_FLAGS}) endif() diff --git a/src/enkf/gridinfo_gfs.f90 b/src/enkf/gridinfo_gfs.f90 index f217f027f..2143e4c8b 100644 --- a/src/enkf/gridinfo_gfs.f90 +++ b/src/enkf/gridinfo_gfs.f90 @@ -132,12 +132,12 @@ subroutine getgridinfo(fileprefix, reducedgrid) ! define sighead on all tasks. call sigio_sropen(iunit,trim(filename),iret) if (iret /= 0) then - print *,'error reading file in gridinfo',trim(filename),' on task',nproc + print *,'error reading file in gridinfo ',trim(filename),' on task',nproc call stop2(24) end if call sigio_srhead(iunit,sighead,iret) if (iret /= 0) then - print *,'error reading file in gridinfo',trim(filename),' on task',nproc + print *,'error reading file in gridinfo ',trim(filename),' on task',nproc call stop2(24) end if call sigio_sclose(iunit,iret) diff --git a/src/enkf/gridinfo_nmmb.f90 b/src/enkf/gridinfo_nmmb.f90 index 28752420d..d5e951ecd 100644 --- a/src/enkf/gridinfo_nmmb.f90 +++ b/src/enkf/gridinfo_nmmb.f90 @@ -17,8 +17,8 @@ module gridinfo real(r_single),public, allocatable, dimension(:,:) :: logp integer,public :: npts ! supported variable names in anavinfo -character(len=max_varname_length),public, dimension(8) :: vars3d_supported = (/ 'u', 'v', 'tv', 'tsen', 'q', 'oz', 'cw', 'prse'/) -character(len=max_varname_length),public, dimension(2) :: vars2d_supported = (/ 'ps', 'sst' /) +character(len=max_varname_length),public, dimension(8) :: vars3d_supported = (/ 'u ', 'v ', 'tv ', 'tsen', 'q ', 'oz ', 'cw ', 'prse'/) +character(len=max_varname_length),public, dimension(2) :: vars2d_supported = (/ 'ps ', 'sst' /) contains subroutine getgridinfo(fileprefix, reducedgrid) diff --git a/src/enkf/gridinfo_wrf.f90 b/src/enkf/gridinfo_wrf.f90 index 19da8ada6..a2bb02bc5 100644 --- a/src/enkf/gridinfo_wrf.f90 +++ b/src/enkf/gridinfo_wrf.f90 @@ -73,8 +73,8 @@ module gridinfo public :: cross2dot public :: dot2cross ! supported variable names in anavinfo - character(len=max_varname_length),public, dimension(10) :: vars3d_supported = (/'u', 'v', 'tv', 'q', 'cw', 'w', 'ph', 'oz', 'tsen', 'prse' /) - character(len=max_varname_length),public, dimension(2) :: vars2d_supported = (/ 'ps', 'sst' /) + character(len=max_varname_length),public, dimension(10) :: vars3d_supported = (/'u ', 'v ', 'tv ', 'q ', 'cw ', 'w ', 'ph ', 'oz ', 'tsen', 'prse' /) + character(len=max_varname_length),public, dimension(2) :: vars2d_supported = (/ 'ps ', 'sst' /) contains diff --git a/src/enkf/gridio_nmmb.f90 b/src/enkf/gridio_nmmb.f90 index 67d8f0b7e..751ef1204 100644 --- a/src/enkf/gridio_nmmb.f90 +++ b/src/enkf/gridio_nmmb.f90 @@ -242,7 +242,7 @@ subroutine readgriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,ntimes,fileprefi end subroutine readgriddata -subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,grdin) +subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,grdin,no_inflate_flag) use params, only: nbackgrounds, anlfileprefixes,fgfileprefixes implicit none @@ -253,6 +253,8 @@ subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,grdin) integer, intent(in) :: n2d,n3d,ndim integer, dimension(0:n3d), intent(in) :: levels real(r_single), dimension(npts,ndim,nbackgrounds), intent(inout) :: grdin +logical, intent(in) :: no_inflate_flag + !Not used here, but added to make writegriddata(...) consistent with gridio_gfs.f90 character(len=500):: filename diff --git a/src/enkf/gridio_wrf.f90 b/src/enkf/gridio_wrf.f90 index aef9896ce..a6456636a 100644 --- a/src/enkf/gridio_wrf.f90 +++ b/src/enkf/gridio_wrf.f90 @@ -516,7 +516,7 @@ end subroutine readgriddata_nmm ! writegriddata.f90: write WRF-ARW or WRF-NMM analysis !------------------------------------------------------------------------- - subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,vargrid) + subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,vargrid,no_inflate_flag) use constants use params, only: nbackgrounds, anlfileprefixes, fgfileprefixes include 'netcdf.inc' @@ -528,6 +528,8 @@ subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,vargrid) character(len=max_varname_length), dimension(n3d), intent(in) :: vars3d integer, dimension(0:n3d), intent(in) :: levels real(r_single), dimension(npts,ndim,nbackgrounds), intent(in) :: vargrid + logical, intent(in) :: no_inflate_flag + !Not used here, but added to make writegriddata(...) consistent with gridio_gfs.f90 !---------------------------------------------------------------------- ! Define variables computed within subroutine @@ -781,6 +783,7 @@ subroutine writegriddata(nanal,vars3d,vars2d,n3d,n2d,levels,ndim,vargrid) read(datestring(9:10),'(i2)') ihour if (nmm .and. nmm_restart) then varstrname = 'NSTART_HOUR' + if(.not. allocated(vargrid_native)) allocate(vargrid_native(1,1,1)) vargrid_native(1,1,1) = ihour call writenetcdfdata(filename,vargrid_native,varstrname,1,1,1) end if diff --git a/src/enkf/observer_nmmb.f90 b/src/enkf/observer_nmmb.f90 deleted file mode 100644 index edf25a36e..000000000 --- a/src/enkf/observer_nmmb.f90 +++ /dev/null @@ -1,130 +0,0 @@ -module observer_enkf -use general_tll2xy_mod, only: llxy_cons - -private -public init_observer_enkf, calc_linhx - -type(llxy_cons) :: gt_data - - -contains - -subroutine init_observer_enkf - use kinds, only: r_kind, i_kind - use params, only: nlons, nlats - use gridinfo, only: latsgrd, lonsgrd - use general_tll2xy_mod, only: general_create_llxy_transform - implicit none - - integer(i_kind) :: i, j - real(r_kind), dimension(nlats, nlons) :: lats, lons - - - do i = 1,nlons - do j = 1,nlats - lats(j,i) = latsgrd((j-1)*nlons+i) - lons(j,i) = lonsgrd((j-1)*nlons+i) - enddo - enddo - call general_create_llxy_transform(lats, lons, nlats, nlons, gt_data) - -end subroutine init_observer_enkf - -subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) -!$$$ subprogram documentation block -! . . . . -! subprogram: calc_linhx -! prgmmr: shlyaeva org: esrl/psd date: 2016-11-29 -! -! abstract: -! -! program history log: -! 2016-11-29 shlyaeva -! -! input argument list: -! -! output argument list: -! -! attributes: -! language: f95 -! -!$$$ - use kinds, only: r_kind,i_kind,r_single - use params, only: nstatefields, nlons, nlats, nlevs, nhr_state, fhr_assim - use gridinfo, only: npts, latsgrd, lonsgrd - use statevec, only: nsdim - use constants, only: zero,one,pi - use sparsearr, only: sparr - use general_tll2xy_mod, only: general_tll2xy - use mpisetup - implicit none - -! Declare passed variables - real(r_single) ,intent(in ) :: hx ! H(x_mean) - real(r_single),dimension(npts,nsdim,nstatefields),intent(in ) :: dens ! x_ens - x_mean, state vector space - real(r_single) ,intent(in ) :: rlat, rlon ! observation lat and lon in radians - real(r_single) ,intent(in ) :: time ! observation time relative to middle of window - type(sparr) ,intent(in ) :: dhx_dx ! dH(x)/dx |x_mean profiles - real(r_single) ,intent( out) :: hx_ens ! H (x_ens) - -! Declare local variables - integer(i_kind) :: ix, iy, it, ixp, iyp, itp - real(r_kind) :: dx, dy - integer(i_kind) :: i,j - real(r_kind) :: delx, dely, delxp, delyp, delt, deltp - logical :: outside - - call general_tll2xy(gt_data, real(rlon,r_kind), real(rlat,r_kind), dx, dy, outside) - - ix = max(1,min(int(dx),nlons)) - iy = max(1,min(int(dy),nlats)) - - delx = max(zero, min(dx - float(ix), one)) - dely = max(zero, min(dy - float(iy), one)) - - delxp = one - delx - delyp = one - dely - - ixp = min(ix + 1, nlons) - iyp = min(iy + 1, nlats) - - iy = iy - 1; iyp = iyp - 1 - - it = 1 - do while (time + fhr_assim > nhr_state(it) .and. it < nstatefields) - it = it + 1 - enddo - itp = it - it = max(1,itp-1) - if (it /= itp) then - delt = (time + fhr_assim - nhr_state(it)) / (nhr_state(itp) - nhr_state(it)) - else - delt = one - endif - - deltp = one - delt - delxp = one - delx - delyp = one - dely - - - ! interpolate state horizontally and in time and do dot product with dHx/dx profile - ! saves from calculating interpolated x_ens for each state variable - - hx_ens = hx - do i = 1, dhx_dx%nnz - j = dhx_dx%ind(i) - hx_ens = hx_ens + dhx_dx%val(i) * & - (( dens( iy*nlons + ix , j, it) *delyp*delxp & - + dens( iyp*nlons + ix , j, it) *dely *delxp & - + dens( iy*nlons + ixp, j, it) *delyp*delx & - + dens( iyp*nlons + ixp, j, it) *dely *delx )*deltp & - + ( dens( iy*nlons + ix , j, itp)*delyp*delxp & - + dens( iyp*nlons + ix , j, itp)*dely *delxp & - + dens( iy*nlons + ixp, j, itp)*delyp*delx & - + dens( iyp*nlons + ixp, j, itp)*dely *delx )*delt) - enddo - - return -end subroutine calc_linhx - -end module observer_enkf diff --git a/src/enkf/observer_nmmb.f90 b/src/enkf/observer_nmmb.f90 new file mode 120000 index 000000000..faa9da004 --- /dev/null +++ b/src/enkf/observer_nmmb.f90 @@ -0,0 +1 @@ +observer_reg.f90 \ No newline at end of file diff --git a/src/enkf/observer_reg.f90 b/src/enkf/observer_reg.f90 index edf25a36e..7e89ca540 100644 --- a/src/enkf/observer_reg.f90 +++ b/src/enkf/observer_reg.f90 @@ -1,8 +1,12 @@ module observer_enkf use general_tll2xy_mod, only: llxy_cons +use statevec, only: nsdim, ns3d, ns2d, slevels +use params, only: nlevs, neigv private -public init_observer_enkf, calc_linhx +public init_observer_enkf, setup_linhx, calc_linhx, calc_linhx_modens,& + destroy_observer_enkf +integer, allocatable, dimension(:) :: kindx type(llxy_cons) :: gt_data @@ -17,6 +21,7 @@ subroutine init_observer_enkf implicit none integer(i_kind) :: i, j + integer(i_kind) :: nn,n,k,nl real(r_kind), dimension(nlats, nlons) :: lats, lons @@ -28,9 +33,34 @@ subroutine init_observer_enkf enddo call general_create_llxy_transform(lats, lons, nlats, nlons, gt_data) + nn = 0 + do n=1,ns3d + if (n .eq. 1) then + nl = slevels(n) + else + nl = slevels(n)-slevels(n-1) + endif + !print *,'ns3d,levs',n,nl + do k=1,nl + nn = nn + 1 + kindx(nn) = k + ! FIXME - deal with state variables with nlevs+1 levels (like prse) + if (kindx(nn) > nlevs) kindx(nn)=nlevs + enddo + enddo + do n=1,ns2d ! 2d fields are treated as surface fields. + nn = nn + 1 + kindx(nn) = 1 + enddo + end subroutine init_observer_enkf -subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) +subroutine destroy_observer_enkf + if (allocated(kindx)) deallocate(kindx) +end subroutine destroy_observer_enkf + +subroutine setup_linhx(rlat, rlon, time, ix, delx, ixp, delxp, iy, dely, & + iyp, delyp, it, delt, itp, deltp) !$$$ subprogram documentation block ! . . . . ! subprogram: calc_linhx @@ -39,7 +69,8 @@ subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) ! abstract: ! ! program history log: -! 2016-11-29 shlyaeva +! 2018-09-05 Guoqing Ge -Added this fuction in observer_wrf to be +! consistent with observer_gfs.f90 ! ! input argument list: ! @@ -54,24 +85,16 @@ subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) use gridinfo, only: npts, latsgrd, lonsgrd use statevec, only: nsdim use constants, only: zero,one,pi - use sparsearr, only: sparr use general_tll2xy_mod, only: general_tll2xy use mpisetup implicit none ! Declare passed variables - real(r_single) ,intent(in ) :: hx ! H(x_mean) - real(r_single),dimension(npts,nsdim,nstatefields),intent(in ) :: dens ! x_ens - x_mean, state vector space real(r_single) ,intent(in ) :: rlat, rlon ! observation lat and lon in radians real(r_single) ,intent(in ) :: time ! observation time relative to middle of window - type(sparr) ,intent(in ) :: dhx_dx ! dH(x)/dx |x_mean profiles - real(r_single) ,intent( out) :: hx_ens ! H (x_ens) - -! Declare local variables - integer(i_kind) :: ix, iy, it, ixp, iyp, itp + integer(i_kind), intent(out) :: ix, iy, it, ixp, iyp, itp + real(r_kind), intent(out) :: delx, dely, delxp, delyp, delt, deltp real(r_kind) :: dx, dy - integer(i_kind) :: i,j - real(r_kind) :: delx, dely, delxp, delyp, delt, deltp logical :: outside call general_tll2xy(gt_data, real(rlon,r_kind), real(rlat,r_kind), dx, dy, outside) @@ -82,9 +105,6 @@ subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) delx = max(zero, min(dx - float(ix), one)) dely = max(zero, min(dy - float(iy), one)) - delxp = one - delx - delyp = one - dely - ixp = min(ix + 1, nlons) iyp = min(iy + 1, nlats) @@ -106,25 +126,125 @@ subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) delxp = one - delx delyp = one - dely +end subroutine setup_linhx + +subroutine calc_linhx(hx, dens, dhx_dx, hx_ens, & + ix, delx, ixp, delxp, iy, dely, iyp, delyp, & + it, delt, itp, deltp) +!$$$ subprogram documentation block +! . . . . +! subprogram: calc_linhx +! prgmmr: shlyaeva org: esrl/psd date: 2016-11-29 +! +! abstract: +! +! program history log: +! 2016-11-29 shlyaeva +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f95 +! +!$$$ + use kinds, only: r_kind,i_kind,r_single + use params, only: nstatefields, nlons, nlats, nlevs, nhr_state, fhr_assim + use gridinfo, only: npts, latsgrd, lonsgrd + use statevec, only: nsdim + use constants, only: zero,one,pi + use sparsearr, only: sparr + use mpisetup + implicit none + +! Declare passed variables + real(r_single) ,intent(in ) :: hx ! H(x_mean) + real(r_single),dimension(npts,nsdim,nstatefields),intent(in ) :: dens ! x_ens - x_mean, state vector space + integer(i_kind), intent(in) :: ix, iy, it, ixp, iyp, itp + real(r_kind), intent(in) :: delx, dely, delxp, delyp, delt, deltp + type(sparr) ,intent(in ) :: dhx_dx ! dH(x)/dx |x_mean profiles + real(r_single) ,intent( out) :: hx_ens ! H (x_ens) + integer(i_kind) i,j,k ! interpolate state horizontally and in time and do dot product with dHx/dx profile ! saves from calculating interpolated x_ens for each state variable - hx_ens = hx do i = 1, dhx_dx%nnz j = dhx_dx%ind(i) - hx_ens = hx_ens + dhx_dx%val(i) * & - (( dens( iy*nlons + ix , j, it) *delyp*delxp & - + dens( iyp*nlons + ix , j, it) *dely *delxp & - + dens( iy*nlons + ixp, j, it) *delyp*delx & - + dens( iyp*nlons + ixp, j, it) *dely *delx )*deltp & - + ( dens( iy*nlons + ix , j, itp)*delyp*delxp & - + dens( iyp*nlons + ix , j, itp)*dely *delxp & - + dens( iy*nlons + ixp, j, itp)*delyp*delx & - + dens( iyp*nlons + ixp, j, itp)*dely *delx )*delt) + k = kindx(j) + hx_ens = hx_ens + dhx_dx%val(i) * & + (( dens( ix*nlons + iy , j, it) *delxp*delyp & + + dens( ixp*nlons + iy , j, it) *delx *delyp & + + dens( ix*nlons + iyp, j, it) *delxp*dely & + + dens( ixp*nlons + iyp, j, it) *delx *dely )*deltp & + + ( dens( ix*nlons + iy , j, itp)*delxp*delyp & + + dens( ixp*nlons + iy , j, itp)*delx *delyp & + + dens( ix*nlons + iyp, j, itp)*delxp*dely & + + dens( ixp*nlons + iyp, j, itp)*delx *dely )*delt) enddo return end subroutine calc_linhx +subroutine calc_linhx_modens(hx, dens, dhx_dx, hx_ens, & + ix, delx, ixp, delxp, iy, dely, iyp, delyp, & + it, delt, itp, deltp, vscale) +!$$$ subprogram documentation block +! . . . . +! subprogram: calc_linhx +! prgmmr: shlyaeva org: esrl/psd date: 2016-11-29 +! +! abstract: +! +! program history log: +! 2016-11-29 shlyaeva +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f95 +! +!$$$ + use kinds, only: r_kind,i_kind,r_single + use params, only: nstatefields, nlons, nlats, nlevs, nhr_state, fhr_assim + use gridinfo, only: npts, latsgrd, lonsgrd + use statevec, only: nsdim + use constants, only: zero,one,pi + use sparsearr, only: sparr + use mpisetup + implicit none + +! Declare passed variables + real(r_single) ,intent(in ) :: hx ! H(x_mean) + real(r_single),dimension(npts,nsdim,nstatefields),intent(in ) :: dens ! x_ens - x_mean, state vector space + integer(i_kind), intent(in) :: ix, iy, it, ixp, iyp, itp + real(r_kind), intent(in) :: delx, dely, delxp, delyp, delt, deltp + type(sparr) ,intent(in ) :: dhx_dx ! dH(x)/dx |x_mean profiles + real(r_single) ,intent( out) :: hx_ens(neigv)! H (x_ens) + real(r_double),dimension(neigv,nlevs+1) ,intent(in ) :: vscale ! vertical scaling (for modulated ens) + integer(i_kind) i,j,k + + ! interpolate state horizontally and in time and do dot product with dHx/dx profile + ! saves from calculating interpolated x_ens for each state variable + hx_ens = hx + do i = 1, dhx_dx%nnz + j = dhx_dx%ind(i) + k = kindx(j) + hx_ens(:) = hx_ens(:) + dhx_dx%val(i) * & + (( dens( ix*nlons + iy , j, it) *vscale(:,k)*delxp*delyp & + + dens( ixp*nlons + iy , j, it) *vscale(:,k)*delx *delyp & + + dens( ix*nlons + iyp, j, it) *vscale(:,k)*delxp*dely & + + dens( ixp*nlons + iyp, j, it) *vscale(:,k)*delx *dely )*deltp & + + ( dens( ix*nlons + iy , j, itp)*vscale(:,k)*delxp*delyp & + + dens( ixp*nlons + iy , j, itp)*vscale(:,k)*delx *delyp & + + dens( ix*nlons + iyp, j, itp)*vscale(:,k)*delxp*dely & + + dens( ixp*nlons + iyp, j, itp)*vscale(:,k)*delx *dely )*delt) + enddo + + return +end subroutine calc_linhx_modens + end module observer_enkf diff --git a/src/enkf/observer_wrf.f90 b/src/enkf/observer_wrf.f90 deleted file mode 100644 index edf25a36e..000000000 --- a/src/enkf/observer_wrf.f90 +++ /dev/null @@ -1,130 +0,0 @@ -module observer_enkf -use general_tll2xy_mod, only: llxy_cons - -private -public init_observer_enkf, calc_linhx - -type(llxy_cons) :: gt_data - - -contains - -subroutine init_observer_enkf - use kinds, only: r_kind, i_kind - use params, only: nlons, nlats - use gridinfo, only: latsgrd, lonsgrd - use general_tll2xy_mod, only: general_create_llxy_transform - implicit none - - integer(i_kind) :: i, j - real(r_kind), dimension(nlats, nlons) :: lats, lons - - - do i = 1,nlons - do j = 1,nlats - lats(j,i) = latsgrd((j-1)*nlons+i) - lons(j,i) = lonsgrd((j-1)*nlons+i) - enddo - enddo - call general_create_llxy_transform(lats, lons, nlats, nlons, gt_data) - -end subroutine init_observer_enkf - -subroutine calc_linhx(hx, dens, rlat, rlon, time, dhx_dx, hx_ens) -!$$$ subprogram documentation block -! . . . . -! subprogram: calc_linhx -! prgmmr: shlyaeva org: esrl/psd date: 2016-11-29 -! -! abstract: -! -! program history log: -! 2016-11-29 shlyaeva -! -! input argument list: -! -! output argument list: -! -! attributes: -! language: f95 -! -!$$$ - use kinds, only: r_kind,i_kind,r_single - use params, only: nstatefields, nlons, nlats, nlevs, nhr_state, fhr_assim - use gridinfo, only: npts, latsgrd, lonsgrd - use statevec, only: nsdim - use constants, only: zero,one,pi - use sparsearr, only: sparr - use general_tll2xy_mod, only: general_tll2xy - use mpisetup - implicit none - -! Declare passed variables - real(r_single) ,intent(in ) :: hx ! H(x_mean) - real(r_single),dimension(npts,nsdim,nstatefields),intent(in ) :: dens ! x_ens - x_mean, state vector space - real(r_single) ,intent(in ) :: rlat, rlon ! observation lat and lon in radians - real(r_single) ,intent(in ) :: time ! observation time relative to middle of window - type(sparr) ,intent(in ) :: dhx_dx ! dH(x)/dx |x_mean profiles - real(r_single) ,intent( out) :: hx_ens ! H (x_ens) - -! Declare local variables - integer(i_kind) :: ix, iy, it, ixp, iyp, itp - real(r_kind) :: dx, dy - integer(i_kind) :: i,j - real(r_kind) :: delx, dely, delxp, delyp, delt, deltp - logical :: outside - - call general_tll2xy(gt_data, real(rlon,r_kind), real(rlat,r_kind), dx, dy, outside) - - ix = max(1,min(int(dx),nlons)) - iy = max(1,min(int(dy),nlats)) - - delx = max(zero, min(dx - float(ix), one)) - dely = max(zero, min(dy - float(iy), one)) - - delxp = one - delx - delyp = one - dely - - ixp = min(ix + 1, nlons) - iyp = min(iy + 1, nlats) - - iy = iy - 1; iyp = iyp - 1 - - it = 1 - do while (time + fhr_assim > nhr_state(it) .and. it < nstatefields) - it = it + 1 - enddo - itp = it - it = max(1,itp-1) - if (it /= itp) then - delt = (time + fhr_assim - nhr_state(it)) / (nhr_state(itp) - nhr_state(it)) - else - delt = one - endif - - deltp = one - delt - delxp = one - delx - delyp = one - dely - - - ! interpolate state horizontally and in time and do dot product with dHx/dx profile - ! saves from calculating interpolated x_ens for each state variable - - hx_ens = hx - do i = 1, dhx_dx%nnz - j = dhx_dx%ind(i) - hx_ens = hx_ens + dhx_dx%val(i) * & - (( dens( iy*nlons + ix , j, it) *delyp*delxp & - + dens( iyp*nlons + ix , j, it) *dely *delxp & - + dens( iy*nlons + ixp, j, it) *delyp*delx & - + dens( iyp*nlons + ixp, j, it) *dely *delx )*deltp & - + ( dens( iy*nlons + ix , j, itp)*delyp*delxp & - + dens( iyp*nlons + ix , j, itp)*dely *delxp & - + dens( iy*nlons + ixp, j, itp)*delyp*delx & - + dens( iyp*nlons + ixp, j, itp)*dely *delx )*delt) - enddo - - return -end subroutine calc_linhx - -end module observer_enkf diff --git a/src/enkf/observer_wrf.f90 b/src/enkf/observer_wrf.f90 new file mode 120000 index 000000000..faa9da004 --- /dev/null +++ b/src/enkf/observer_wrf.f90 @@ -0,0 +1 @@ +observer_reg.f90 \ No newline at end of file diff --git a/src/enkf/smooth_gfs.f90 b/src/enkf/smooth_gfs.f90 index 2bfa96c19..2defe5bfd 100644 --- a/src/enkf/smooth_gfs.f90 +++ b/src/enkf/smooth_gfs.f90 @@ -43,6 +43,7 @@ subroutine smooth(grids) if (n2(np) > ncdim) n2(np) = ncdim if (n1(np) > ncdim .and. npmax == 0) npmax = np-1 enddo +if (npmax == 0) npmax = numproc-1 ! spectrally smooth the grids ! bcast out to all procs. if (nproc <= npmax) then diff --git a/src/get_gefs_for_regional.f90 b/src/get_gefs_for_regional.f90 index c754a2536..b10f9cce3 100644 --- a/src/get_gefs_for_regional.f90 +++ b/src/get_gefs_for_regional.f90 @@ -275,6 +275,7 @@ subroutine get_gefs_for_regional nsig_gfs=levs jcap_gfs=njcap + if(allocated(nems_vcoord)) deallocate(nems_vcoord) allocate(nems_vcoord(levs+1,3,2)) call nemsio_getfilehead(gfile,iret=iret,vcoord=nems_vcoord) if ( iret /= 0 ) call error_msg(trim(my_name),trim(filename),' ', & @@ -297,6 +298,7 @@ subroutine get_gefs_for_regional call stop2(85) endif + if(allocated(vcoord)) deallocate(vcoord) allocate(vcoord(levs+1,nvcoord)) vcoord(:,1:nvcoord) = nems_vcoord(:,1:nvcoord,1) deallocate(nems_vcoord) diff --git a/src/m_obsLList.F90 b/src/m_obsLList.F90 index 44d8939af..e83046cd9 100644 --- a/src/m_obsLList.F90 +++ b/src/m_obsLList.F90 @@ -15,6 +15,8 @@ module m_obsLList ! latlonRange from (elat,elon) values of observations. ! 2016-07-25 j.guo - added getTLDdotprod, to accumulate obsNode TLD-dot_produst ! 2016-09-19 j.guo - added function lincr_() to extend []_lsize(). +! 2017-08-26 G.Ge - change allocate(headLL%mold,mold=mold) +! to allocate(headLL%mold,source=mold) ! ! input argument list: see Fortran 90 style document below ! @@ -263,7 +265,7 @@ subroutine lreset_(headLL,mold,stat) return endif endif - allocate(headLL%mold,mold=mold) + allocate(headLL%mold,source=mold) _EXIT_(myname_) return end subroutine lreset_ diff --git a/src/mod_fv3_lola.f90 b/src/mod_fv3_lola.f90 index 27714205e..c59526952 100644 --- a/src/mod_fv3_lola.f90 +++ b/src/mod_fv3_lola.f90 @@ -105,7 +105,7 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) !$$$ end documentation block use kinds, only: r_kind,i_kind - use constants, only: quarter,one,two,half,zero,deg2rad,rearth + use constants, only: quarter,one,two,half,zero,deg2rad,rearth,rad2deg use gridmod, only:grid_ratio_fv3_regional, region_lat,region_lon,nlat,nlon use gridmod, only: region_dy,region_dx,region_dyi,region_dxi,coeffy,coeffx use gridmod, only:init_general_transform,region_dy,region_dx @@ -160,9 +160,9 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) allocate(gcrlon(nx,ny)) do j=1,ny do i=1,nx - xc(i,j)=cosd(grid_latt(i,j))*cosd(grid_lont(i,j)) - yc(i,j)=cosd(grid_latt(i,j))*sind(grid_lont(i,j)) - zc(i,j)=sind(grid_latt(i,j)) + xc(i,j)=cos(grid_latt(i,j)*deg2rad)*cos(grid_lont(i,j)*deg2rad) + yc(i,j)=cos(grid_latt(i,j)*deg2rad)*sin(grid_lont(i,j)*deg2rad) + zc(i,j)=sin(grid_latt(i,j)*deg2rad) enddo enddo @@ -176,8 +176,8 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) xcent=rnorm*xcent ycent=rnorm*ycent zcent=rnorm*zcent - centlat=asind(zcent) - centlon=atan2d(ycent,xcent) + centlat=asin(zcent)*rad2deg + centlon=atan2(ycent,xcent)*rad2deg !! compute new lats, lons call rotate2deg(grid_lont,grid_latt,gcrlon,gcrlat, & @@ -255,7 +255,7 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) enddo do i=1,nlat - dxx=rearth*cosd(rlat_in(i,1))*adlon*deg2rad + dxx=rearth*cos(rlat_in(i,1)*deg2rad)*adlon*deg2rad dxxi=one/dxx dxxh=half/dxx do j=1,nlon @@ -503,9 +503,9 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) do j=1,ny+1 do i=1,nx+1 - x(i,j)=cosd(grid_lat(i,j))*cosd(grid_lon(i,j)) - y(i,j)=cosd(grid_lat(i,j))*sind(grid_lon(i,j)) - z(i,j)=sind(grid_lat(i,j)) + x(i,j)=cos(grid_lat(i,j)*deg2rad)*cos(grid_lon(i,j)*deg2rad) + y(i,j)=cos(grid_lat(i,j)*deg2rad)*sin(grid_lon(i,j)*deg2rad) + z(i,j)=sin(grid_lat(i,j)*deg2rad) enddo enddo @@ -517,9 +517,9 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) rlat=half*(grid_lat(i,j)+grid_lat(i+1,j)) rlon=half*(grid_lon(i,j)+grid_lon(i+1,j)) ! vector to center of the edge - xr=cosd(rlat)*cosd(rlon) - yr=cosd(rlat)*sind(rlon) - zr=sind(rlat) + xr=cos(rlat*deg2rad)*cos(rlon*deg2rad) + yr=cos(rlat*deg2rad)*sin(rlon*deg2rad) + zr=sin(rlat*deg2rad) ! vector of the edge xu= x(i+1,j)-x(i,j) yu= y(i+1,j)-y(i,j) @@ -538,9 +538,9 @@ subroutine generate_anl_grid(nx,ny,grid_lon,grid_lont,grid_lat,grid_latt) do i=1,nx+1 rlat=half*(grid_lat(i,j)+grid_lat(i,j+1)) rlon=half*(grid_lon(i,j)+grid_lon(i,j+1)) - xr=cosd(rlat)*cosd(rlon) - yr=cosd(rlat)*sind(rlon) - zr=sind(rlat) + xr=cos(rlat*deg2rad)*cos(rlon*deg2rad) + yr=cos(rlat*deg2rad)*sin(rlon*deg2rad) + zr=sin(rlat*deg2rad) xv= x(i,j+1)-x(i,j) yv= y(i,j+1)-y(i,j) zv= z(i,j+1)-z(i,j) @@ -833,6 +833,7 @@ subroutine rotate2deg(rlon_in,rlat_in,rlon_out,rlat_out,rlon0,rlat0,nx,ny) use kinds, only: r_kind,i_kind + use constants, only: deg2rad,rearth,rad2deg implicit none integer(i_kind), intent(in ) :: nx,ny ! fv3 tile x- and y-dimensions @@ -846,26 +847,26 @@ subroutine rotate2deg(rlon_in,rlat_in,rlon_out,rlat_out,rlon0,rlat0,nx,ny) do i=1,nx ! 1. compute x,y,z from rlon_in, rlat_in - x=cosd(rlat_in(i,j))*cosd(rlon_in(i,j)) - y=cosd(rlat_in(i,j))*sind(rlon_in(i,j)) - z=sind(rlat_in(i,j)) + x=cos(rlat_in(i,j)*deg2rad)*cos(rlon_in(i,j)*deg2rad) + y=cos(rlat_in(i,j)*deg2rad)*sin(rlon_in(i,j)*deg2rad) + z=sin(rlat_in(i,j)*deg2rad) ! 2. rotate (x,y,z) about z axis by amount rlon0 -- (x,y,z) --> (xt,yt,zt) - xt= x*cosd(rlon0)+y*sind(rlon0) - yt=-x*sind(rlon0)+y*cosd(rlon0) + xt= x*cos(rlon0*deg2rad)+y*sin(rlon0*deg2rad) + yt=-x*sin(rlon0*deg2rad)+y*cos(rlon0*deg2rad) zt=z ! 3. rotate (xt,yt,zt) about yt axis by amount rlat0 --- (xt,yt,zt) --> (xtt,ytt,ztt) - xtt= xt*cosd(rlat0)+zt*sind(rlat0) + xtt= xt*cos(rlat0*deg2rad)+zt*sin(rlat0*deg2rad) ytt= yt - ztt=-xt*sind(rlat0)+zt*cosd(rlat0) + ztt=-xt*sin(rlat0*deg2rad)+zt*cos(rlat0*deg2rad) ! 4. compute rlon_out, rlat_out from xtt,ytt,ztt - rlat_out(i,j)=asind(ztt) - rlon_out(i,j)=atan2d(ytt,xtt) + rlat_out(i,j)=asin(ztt)*rad2deg + rlon_out(i,j)=atan2(ytt,xtt)*rad2deg enddo enddo end subroutine rotate2deg @@ -889,6 +890,7 @@ subroutine unrotate2deg(rlon_in,rlat_in,rlon_out,rlat_out,rlon0,rlat0,nx,ny) !$$$ end documentation block use kinds, only: r_kind,i_kind + use constants, only: deg2rad,rearth,rad2deg implicit none real(r_kind),intent(in ) :: rlon_out(nx,ny),rlat_out(nx,ny),rlon0,rlat0 @@ -899,20 +901,20 @@ subroutine unrotate2deg(rlon_in,rlat_in,rlon_out,rlat_out,rlon0,rlat0,nx,ny) integer(i_kind) i,j do j=1,ny do i=1,nx - xtt=cosd(rlat_out(i,j))*cosd(rlon_out(i,j)) - ytt=cosd(rlat_out(i,j))*sind(rlon_out(i,j)) - ztt=sind(rlat_out(i,j)) + xtt=cos(rlat_out(i,j)*deg2rad)*cos(rlon_out(i,j)*deg2rad) + ytt=cos(rlat_out(i,j)*deg2rad)*sin(rlon_out(i,j)*deg2rad) + ztt=sin(rlat_out(i,j)*deg2rad) - xt= xtt*cosd(rlat0)-ztt*sind(rlat0) + xt= xtt*cos(rlat0*deg2rad)-ztt*sin(rlat0*deg2rad) yt= ytt - zt= xtt*sind(rlat0)+ztt*cosd(rlat0) + zt= xtt*sin(rlat0*deg2rad)+ztt*cos(rlat0*deg2rad) - x= xt*cosd(rlon0)-yt*sind(rlon0) - y= xt*sind(rlon0)+yt*cosd(rlon0) + x= xt*cos(rlon0*deg2rad)-yt*sin(rlon0*deg2rad) + y= xt*sin(rlon0*deg2rad)+yt*cos(rlon0*deg2rad) z= zt - rlat_in(i,j)=asind(z) - rlon_in(i,j)=atan2d(y,x) + rlat_in(i,j)=asin(z)*rad2deg + rlon_in(i,j)=atan2(y,x)*rad2deg enddo enddo diff --git a/src/radiance_mod.f90 b/src/radiance_mod.f90 index 15136a7fa..b4a5b9ae8 100644 --- a/src/radiance_mod.f90 +++ b/src/radiance_mod.f90 @@ -98,8 +98,8 @@ module radiance_mod integer(i_kind),pointer,dimension(:) :: lcloud4crtm=> NULL() ! -1 clear-sky; 0 forwad operator only; 1 iallsky logical :: laerosol_fwd,laerosol integer(i_kind),pointer,dimension(:) :: laerosol4crtm => NULL() ! -1 no aero used; 0 forwad operator only; 1 iaerosol - real(r_kind),pointer,dimension(:) :: cclr - real(r_kind),pointer,dimension(:) :: ccld + real(r_kind),pointer,dimension(:) :: cclr => NULL() + real(r_kind),pointer,dimension(:) :: ccld => NULL() end type rad_obs_type type(rad_obs_type),save,dimension(:),allocatable :: rad_type_info diff --git a/src/read_files.f90 b/src/read_files.f90 index 73dee4789..39e3f742a 100644 --- a/src/read_files.f90 +++ b/src/read_files.f90 @@ -255,7 +255,7 @@ subroutine read_files(mype) idate5(3)=idateg(3); idate5(4)=idateg(1); idate5(5)=0 call w3fs21(idate5,nmings) nming2=nmings+60*hourg - write(6,*)'READ_FILES: atm guess file',filename,hourg,idateg,nming2 + write(6,*)'READ_FILES: atm guess file ',filename,hourg,idateg,nming2 t4dv=real((nming2-iwinbgn),r_kind)*r60inv if (l4dvar.or.l4densvar) then if (t4dvwinlen) cycle diff --git a/src/setupaod.f90 b/src/setupaod.f90 index c7d482633..340eeafec 100644 --- a/src/setupaod.f90 +++ b/src/setupaod.f90 @@ -418,7 +418,7 @@ subroutine setupaod(lunin,mype,nchanl,nreal,nobs,& if(luse_obsdiag)allocate (my_head%diags(icc)) my_head%nlaero = icc ! profile observation count - call get_ij(mm1,slats,slons,my_head%ij(1),my_head%wij(1)) + call get_ij(mm1,slats,slons,my_head%ij,my_head%wij) my_head%time=dtime my_head%luse=luse(n) diff --git a/src/setupoz.f90 b/src/setupoz.f90 index 34317c5d6..d928819d1 100644 --- a/src/setupoz.f90 +++ b/src/setupoz.f90 @@ -1445,8 +1445,7 @@ subroutine setupozlev(lunin,mype,stats_oz,nlevs,nreal,nobs,& ! Set (i,j,k) indices of guess gridpoint that bound obs location my_head%dlev = dpres - call get_ijk(mm1,dlat,dlon,dpres,& - my_head%ij(1),my_head%wij(1)) + call get_ijk(mm1,dlat,dlon,dpres,my_head%ij,my_head%wij) do k=1,8 my_head%wij(k)=my_head%wij(k)*constoz diff --git a/ush/comenkf_namelist.sh b/ush/comenkf_namelist.sh new file mode 100755 index 000000000..405b0a0ab --- /dev/null +++ b/ush/comenkf_namelist.sh @@ -0,0 +1,131 @@ + +cat < enkf.nml + + &nam_enkf + datestring = '$ANAL_TIME', + datapath = './', + analpertwtnh = 0.9, + analpertwtsh = 0.9, + analpertwttr = 0.9, + lupd_satbiasc = .false., + zhuberleft = 1.e10, + zhuberright = 1.e10, + huber = .false., + varqc = .false., + covinflatemax = 1.e2, + covinflatemin = 1.0, + pseudo_rh = .true., + corrlengthnh = 500, + corrlengthsh = 500, + corrlengthtr = 500, + obtimelnh = 1.e30, + obtimelsh = 1.e30, + obtimeltr = 1.e30, + iassim_order = 0, + lnsigcutoffnh = 0.4, + lnsigcutoffsh = 0.4, + lnsigcutofftr = 0.4, + lnsigcutoffsatnh = 0.4, + lnsigcutoffsatsh = 0.4, + lnsigcutoffsattr = 0.4, + lnsigcutoffpsnh = 0.4, + lnsigcutoffpssh = 0.4, + lnsigcutoffpstr = 0.4, + simple_partition = .true., + nlons = $NLONS, + nlats = $NLATS, + smoothparm = -1, + readin_localization = .false., + saterrfact = 1.0, + numiter = 6, + sprd_tol = 1.e30, + paoverpb_thresh = 0.99, + reducedgrid = .false., + nlevs = $NLEVS, + nanals = $NMEM_ENKF, + nvars = 5, + deterministic = .true., + sortinc = .true., + univaroz = .true., + regional = .true., + adp_anglebc = .true., + angord = 4, + use_edges = .false., + emiss_bc = .true., + biasvar = -500 +/ + &satobs_enkf + sattypes_rad(1) = 'amsua_n15', dsis(1) = 'amsua_n15', + sattypes_rad(2) = 'amsua_n18', dsis(2) = 'amsua_n18', + sattypes_rad(3) = 'amsua_n19', dsis(3) = 'amsua_n19', + sattypes_rad(4) = 'amsub_n16', dsis(4) = 'amsub_n16', + sattypes_rad(5) = 'amsub_n17', dsis(5) = 'amsub_n17', + sattypes_rad(6) = 'amsua_aqua', dsis(6) = 'amsua_aqua', + sattypes_rad(7) = 'amsua_metop-a', dsis(7) = 'amsua_metop-a', + sattypes_rad(8) = 'airs_aqua', dsis(8) = 'airs281SUBSET_aqua', + sattypes_rad(9) = 'hirs3_n17', dsis(9) = 'hirs3_n17', + sattypes_rad(10) = 'hirs4_n19', dsis(10)= 'hirs4_n19', + sattypes_rad(11) = 'hirs4_metop-a', dsis(11)= 'hirs4_metop-a', + sattypes_rad(12) = 'mhs_n18', dsis(12)= 'mhs_n18', + sattypes_rad(13) = 'mhs_n19', dsis(13)= 'mhs_n19', + sattypes_rad(14) = 'mhs_metop-a', dsis(14)= 'mhs_metop-a', + sattypes_rad(15) = 'goes_img_g11', dsis(15)= 'imgr_g11', + sattypes_rad(16) = 'goes_img_g12', dsis(16)= 'imgr_g12', + sattypes_rad(17) = 'goes_img_g13', dsis(17)= 'imgr_g13', + sattypes_rad(18) = 'goes_img_g14', dsis(18)= 'imgr_g14', + sattypes_rad(19) = 'goes_img_g15', dsis(19)= 'imgr_g15', + sattypes_rad(20) = 'avhrr3_n18', dsis(20)= 'avhrr3_n18', + sattypes_rad(21) = 'avhrr3_metop-a',dsis(21)= 'avhrr3_metop-a', + sattypes_rad(22) = 'avhrr3_n19', dsis(22)= 'avhrr3_n19', + sattypes_rad(23) = 'amsre_aqua', dsis(23)= 'amsre_aqua', + sattypes_rad(24) = 'ssmis_f16', dsis(24)= 'ssmis_f16', + sattypes_rad(25) = 'ssmis_f17', dsis(25)= 'ssmis_f17', + sattypes_rad(26) = 'ssmis_f18', dsis(26)= 'ssmis_f18', + sattypes_rad(27) = 'ssmis_f19', dsis(27)= 'ssmis_f19', + sattypes_rad(28) = 'ssmis_f20', dsis(28)= 'ssmis_f20', + sattypes_rad(29) = 'sndrd1_g11', dsis(29)= 'sndrD1_g11', + sattypes_rad(30) = 'sndrd2_g11', dsis(30)= 'sndrD2_g11', + sattypes_rad(31) = 'sndrd3_g11', dsis(31)= 'sndrD3_g11', + sattypes_rad(32) = 'sndrd4_g11', dsis(32)= 'sndrD4_g11', + sattypes_rad(33) = 'sndrd1_g12', dsis(33)= 'sndrD1_g12', + sattypes_rad(34) = 'sndrd2_g12', dsis(34)= 'sndrD2_g12', + sattypes_rad(35) = 'sndrd3_g12', dsis(35)= 'sndrD3_g12', + sattypes_rad(36) = 'sndrd4_g12', dsis(36)= 'sndrD4_g12', + sattypes_rad(37) = 'sndrd1_g13', dsis(37)= 'sndrD1_g13', + sattypes_rad(38) = 'sndrd2_g13', dsis(38)= 'sndrD2_g13', + sattypes_rad(39) = 'sndrd3_g13', dsis(39)= 'sndrD3_g13', + sattypes_rad(40) = 'sndrd4_g13', dsis(40)= 'sndrD4_g13', + sattypes_rad(41) = 'sndrd1_g14', dsis(41)= 'sndrD1_g14', + sattypes_rad(42) = 'sndrd2_g14', dsis(42)= 'sndrD2_g14', + sattypes_rad(43) = 'sndrd3_g14', dsis(43)= 'sndrD3_g14', + sattypes_rad(44) = 'sndrd4_g14', dsis(44)= 'sndrD4_g14', + sattypes_rad(45) = 'sndrd1_g15', dsis(45)= 'sndrD1_g15', + sattypes_rad(46) = 'sndrd2_g15', dsis(46)= 'sndrD2_g15', + sattypes_rad(47) = 'sndrd3_g15', dsis(47)= 'sndrD3_g15', + sattypes_rad(48) = 'sndrd4_g15', dsis(48)= 'sndrD4_g15', + sattypes_rad(49) = 'iasi_metop-a', dsis(49)= 'iasi616_metop-a', + sattypes_rad(50) = 'seviri_m08', dsis(50)= 'seviri_m08', + sattypes_rad(51) = 'seviri_m09', dsis(51)= 'seviri_m09', + sattypes_rad(52) = 'seviri_m10', dsis(52)= 'seviri_m10', + sattypes_rad(53) = 'amsua_metop-b', dsis(53)= 'amsua_metop-b', + sattypes_rad(54) = 'hirs4_metop-b', dsis(54)= 'hirs4_metop-b', + sattypes_rad(55) = 'mhs_metop-b', dsis(15)= 'mhs_metop-b', + sattypes_rad(56) = 'iasi_metop-b', dsis(56)= 'iasi616_metop-b', + sattypes_rad(57) = 'avhrr3_metop-b',dsis(56)= 'avhrr3_metop-b', + sattypes_rad(58) = 'atms_npp', dsis(58)= 'atms_npp', + sattypes_rad(59) = 'cris_npp', dsis(59)= 'cris_npp', + / + &ozobs_enkf + sattypes_oz(1) = 'sbuv2_n16', + sattypes_oz(2) = 'sbuv2_n17', + sattypes_oz(3) = 'sbuv2_n18', + sattypes_oz(4) = 'sbuv2_n19', + sattypes_oz(5) = 'omi_aura', + sattypes_oz(6) = 'gome_metop-a', + sattypes_oz(7) = 'gome_metop-b', + / +&nam_wrf + arw = $IF_ARW, + nmm = $IF_NMM, + / +EOF diff --git a/ush/comenkf_namelist_gfs.sh b/ush/comenkf_namelist_gfs.sh new file mode 100755 index 000000000..9d83c2ead --- /dev/null +++ b/ush/comenkf_namelist_gfs.sh @@ -0,0 +1,127 @@ + +cat < enkf.nml + + &nam_enkf + datestring = '$ANAL_TIME', + datapath = './', + analpertwtnh = 0.85, + analpertwtsh = 0.85, + analpertwttr = 0.85, + lupd_satbiasc = .false., + zhuberleft = 1.e10, + zhuberright = 1.e10, + huber = .false., + varqc = .false., + covinflatemax = 1.e2, + covinflatemin = 1.0, + pseudo_rh = .true., + corrlengthnh = 2000, + corrlengthsh = 2000, + corrlengthtr = 2000, + obtimelnh = 1.e30, + obtimelsh = 1.e30, + obtimeltr = 1.e30, + iassim_order = 0, + lnsigcutoffnh = 2.0, + lnsigcutoffsh = 2.0, + lnsigcutofftr = 2.0, + lnsigcutoffsatnh = 2.0, + lnsigcutoffsatsh = 2.0, + lnsigcutoffsattr = 2.0, + lnsigcutoffpsnh = 2.0, + lnsigcutoffpssh = 2.0, + lnsigcutoffpstr = 2.0, + nlons = $LONA, + nlats = $LATA, + smoothparm = -1, + readin_localization = .true., + saterrfact = 1.0, + numiter = 3, + sprd_tol = 1.e30, + paoverpb_thresh = 0.98, + reducedgrid = .false., + nlevs = $LEVS, + nanals = $NMEM_ENKF, + nvars = $NVARS, + deterministic = .true., + sortinc = .true., + univaroz = .true., + regional = .false., + adp_anglebc = .true., + angord = 4, + nmmb = .false., + use_edges = .false., + emiss_bc = .true., + biasvar = -500 +/ + &satobs_enkf + sattypes_rad(1) = 'amsua_n15', dsis(1) = 'amsua_n15', + sattypes_rad(2) = 'amsua_n18', dsis(2) = 'amsua_n18', + sattypes_rad(3) = 'amsua_n19', dsis(3) = 'amsua_n19', + sattypes_rad(4) = 'amsub_n16', dsis(4) = 'amsub_n16', + sattypes_rad(5) = 'amsub_n17', dsis(5) = 'amsub_n17', + sattypes_rad(6) = 'amsua_aqua', dsis(6) = 'amsua_aqua', + sattypes_rad(7) = 'amsua_metop-a', dsis(7) = 'amsua_metop-a', + sattypes_rad(8) = 'airs_aqua', dsis(8) = 'airs281SUBSET_aqua', + sattypes_rad(9) = 'hirs3_n17', dsis(9) = 'hirs3_n17', + sattypes_rad(10) = 'hirs4_n19', dsis(10)= 'hirs4_n19', + sattypes_rad(11) = 'hirs4_metop-a', dsis(11)= 'hirs4_metop-a', + sattypes_rad(12) = 'mhs_n18', dsis(12)= 'mhs_n18', + sattypes_rad(13) = 'mhs_n19', dsis(13)= 'mhs_n19', + sattypes_rad(14) = 'mhs_metop-a', dsis(14)= 'mhs_metop-a', + sattypes_rad(15) = 'goes_img_g11', dsis(15)= 'imgr_g11', + sattypes_rad(16) = 'goes_img_g12', dsis(16)= 'imgr_g12', + sattypes_rad(17) = 'goes_img_g13', dsis(17)= 'imgr_g13', + sattypes_rad(18) = 'goes_img_g14', dsis(18)= 'imgr_g14', + sattypes_rad(19) = 'goes_img_g15', dsis(19)= 'imgr_g15', + sattypes_rad(20) = 'avhrr3_n18', dsis(20)= 'avhrr3_n18', + sattypes_rad(21) = 'avhrr3_metop-a',dsis(21)= 'avhrr3_metop-a', + sattypes_rad(22) = 'avhrr3_n19', dsis(22)= 'avhrr3_n19', + sattypes_rad(23) = 'amsre_aqua', dsis(23)= 'amsre_aqua', + sattypes_rad(24) = 'ssmis_f16', dsis(24)= 'ssmis_f16', + sattypes_rad(25) = 'ssmis_f17', dsis(25)= 'ssmis_f17', + sattypes_rad(26) = 'ssmis_f18', dsis(26)= 'ssmis_f18', + sattypes_rad(27) = 'ssmis_f19', dsis(27)= 'ssmis_f19', + sattypes_rad(28) = 'ssmis_f20', dsis(28)= 'ssmis_f20', + sattypes_rad(29) = 'sndrd1_g11', dsis(29)= 'sndrD1_g11', + sattypes_rad(30) = 'sndrd2_g11', dsis(30)= 'sndrD2_g11', + sattypes_rad(31) = 'sndrd3_g11', dsis(31)= 'sndrD3_g11', + sattypes_rad(32) = 'sndrd4_g11', dsis(32)= 'sndrD4_g11', + sattypes_rad(33) = 'sndrd1_g12', dsis(33)= 'sndrD1_g12', + sattypes_rad(34) = 'sndrd2_g12', dsis(34)= 'sndrD2_g12', + sattypes_rad(35) = 'sndrd3_g12', dsis(35)= 'sndrD3_g12', + sattypes_rad(36) = 'sndrd4_g12', dsis(36)= 'sndrD4_g12', + sattypes_rad(37) = 'sndrd1_g13', dsis(37)= 'sndrD1_g13', + sattypes_rad(38) = 'sndrd2_g13', dsis(38)= 'sndrD2_g13', + sattypes_rad(39) = 'sndrd3_g13', dsis(39)= 'sndrD3_g13', + sattypes_rad(40) = 'sndrd4_g13', dsis(40)= 'sndrD4_g13', + sattypes_rad(41) = 'sndrd1_g14', dsis(41)= 'sndrD1_g14', + sattypes_rad(42) = 'sndrd2_g14', dsis(42)= 'sndrD2_g14', + sattypes_rad(43) = 'sndrd3_g14', dsis(43)= 'sndrD3_g14', + sattypes_rad(44) = 'sndrd4_g14', dsis(44)= 'sndrD4_g14', + sattypes_rad(45) = 'sndrd1_g15', dsis(45)= 'sndrD1_g15', + sattypes_rad(46) = 'sndrd2_g15', dsis(46)= 'sndrD2_g15', + sattypes_rad(47) = 'sndrd3_g15', dsis(47)= 'sndrD3_g15', + sattypes_rad(48) = 'sndrd4_g15', dsis(48)= 'sndrD4_g15', + sattypes_rad(49) = 'iasi_metop-a', dsis(49)= 'iasi616_metop-a', + sattypes_rad(50) = 'seviri_m08', dsis(50)= 'seviri_m08', + sattypes_rad(51) = 'seviri_m09', dsis(51)= 'seviri_m09', + sattypes_rad(52) = 'seviri_m10', dsis(52)= 'seviri_m10', + sattypes_rad(53) = 'amsua_metop-b', dsis(53)= 'amsua_metop-b', + sattypes_rad(54) = 'hirs4_metop-b', dsis(54)= 'hirs4_metop-b', + sattypes_rad(55) = 'mhs_metop-b', dsis(15)= 'mhs_metop-b', + sattypes_rad(56) = 'iasi_metop-b', dsis(56)= 'iasi616_metop-b', + sattypes_rad(57) = 'avhrr3_metop-b',dsis(56)= 'avhrr3_metop-b', + sattypes_rad(58) = 'atms_npp', dsis(58)= 'atms_npp', + sattypes_rad(59) = 'cris_npp', dsis(59)= 'cris_npp', + / + &ozobs_enkf + sattypes_oz(1) = 'sbuv2_n16', + sattypes_oz(2) = 'sbuv2_n17', + sattypes_oz(3) = 'sbuv2_n18', + sattypes_oz(4) = 'sbuv2_n19', + sattypes_oz(5) = 'omi_aura', + sattypes_oz(6) = 'gome_metop-a', + sattypes_oz(7) = 'gome_metop-b', + / +EOF diff --git a/ush/comenkf_run_gfs.ksh b/ush/comenkf_run_gfs.ksh new file mode 100755 index 000000000..3f4e105a2 --- /dev/null +++ b/ush/comenkf_run_gfs.ksh @@ -0,0 +1,223 @@ +#!/bin/ksh +##################################################### +# machine set up (users should change this part) +##################################################### + +set -x + +#-------------------------------------------------- + GSIPROC=32 + ARCH='LINUX_LSF' + +##################################################### +##case set up (users should change this part) +##################################################### +# +# GFSCASE = cases used for DTC test +# T574, T254, T126, T62, enkf_glb_t254 +# ANAL_TIME= analysis time (YYYYMMDDHH) +# WORK_ROOT= working directory, where GSI runs +# PREPBURF = path of PreBUFR conventional obs +# BK_ROOT = path of background files +# OBS_ROOT = path of observations files +# FIX_ROOT = path of fix files +# ENKF_EXE = path and name of the EnKF executable + ANAL_TIME=2014092918 + GFSCASE=enkf_glb_t254 + JOB_DIR=the_job_directory + #normally you put run scripts here and submit jobs form here, require a copy of enkf_gfs.x at this directory + RUN_NAME=a_descriptive_run_name_such_as_case05_3denvar_etc + OBS_ROOT=the_directory_where_observation_files_are_located + BK_ROOT=the_directory_where_background_files_are_located + GSI_ROOT=the_comgsi_main directory where src/ ush/ fix/ etc are located + CRTM_ROOT=the_CRTM_directory + diag_ROOT=the_observer_directory_where_diag_files_exist + WORK_ROOT=${JOB_DIR}/${RUN_NAME} + FIX_ROOT=${GSI_ROOT}/fix + ENKF_EXE=${JOB_DIR}/enkf_gfs.x + ENKF_NAMELIST=${GSI_ROOT}/ush/comenkf_namelist_gfs.sh + +# Note: number of pe >= NMEM_ENKF +NMEM_ENKF=10 +LEVS=64 +NVARS=5 + +# Set the JCAP resolution which you want. +# All resolutions use LEVS=64 +if [[ "$GFSCASE" = "T62" ]]; then + JCAP=62 + JCAP_B=62 +elif [[ "$GFSCASE" = "T126" ]]; then + JCAP=126 + JCAP_B=126 +elif [[ "$GFSCASE" = "enkf_glb_t254" ]]; then + JCAP=254 + JCAP_B=254 +elif [[ "$GFSCASE" = "T254" ]]; then + JCAP=254 + JCAP_B=574 +elif [[ "$GFSCASE" = "T574" ]]; then + JCAP=574 + JCAP_B=1534 +else + echo "INVALID case = $GFSCASE" + exit +fi + +# Given the requested resolution, set dependent resolution parameters +if [[ "$JCAP" = "382" ]]; then + LONA=768 + LATA=384 + DELTIM=180 + resol=1 +elif [[ "$JCAP" = "574" ]]; then + LONA=1152 + LATA=576 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "254" ]]; then + LONA=512 + LATA=256 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "126" ]]; then + LONA=256 + LATA=128 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "62" ]]; then + LONA=192 + LATA=94 + DELTIM=1200 + resol=2 +else + echo "INVALID JCAP = $JCAP" + exit +fi +NLAT=` expr $LATA + 2 ` + +ncp=/bin/cp +##################################################### +# Users should NOT change script after this point +##################################################### +# +case $ARCH in + 'IBM_LSF') + ###### IBM LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX') + if [ $GSIPROC = 1 ]; then + #### Linux workstation - single processor + RUN_COMMAND="" + else + ###### Linux workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + 'LINUX_LSF') + ###### LINUX LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX_PBS') + #### Linux cluster PBS (Portable Batch System) + RUN_COMMAND="mpirun -np ${GSIPROC} " ;; + + 'DARWIN_PGI') + ### Mac - mpi run + if [ $GSIPROC = 1 ]; then + #### Mac workstation - single processor + RUN_COMMAND="" + else + ###### Mac workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + * ) + print "error: $ARCH is not a supported platform configuration." + exit 1 ;; +esac + +# Given the analysis date, compute the date from which the +# first guess comes. Extract cycle and set prefix and suffix +# for guess and observation data files +PDYa=`echo $ANAL_TIME | cut -c1-8` +cyca=`echo $ANAL_TIME | cut -c9-10` +gdate=`date -u -d "$PDYa $cyca -6 hour" +%Y%m%d%H` #6hr ago + +PDYg=`echo $gdate | cut -c1-8` +cycg=`echo $gdate | cut -c9-10` +prefix_tbc=gdas1.t${cycg}z + +# Directories for test case +dirtbc=$BK_ROOT + +# Fixed files +ANAVINFO=${diag_ROOT}/anavinfo +CONVINFO=${diag_ROOT}/convinfo +SATINFO=${diag_ROOT}/satinfo +SCANINFO=${diag_ROOT}/scaninfo +OZINFO=${diag_ROOT}/ozinfo +LOCINFO=${diag_ROOT}/hybens_info + +# Set up workdir +rm -rf $WORK_ROOT +mkdir -p $WORK_ROOT +cd $WORK_ROOT + +#Build EnKF namelist on-the-fly +. $ENKF_NAMELIST + +#$ncp $ENKF_EXE ./enkf.x +cp $ENKF_EXE enkf.x + +cp $ANAVINFO ./anavinfo +cp $CONVINFO ./convinfo +cp $SATINFO ./satinfo +cp $SCANINFO ./scaninfo +cp $OZINFO ./ozinfo +cp $LOCINFO ./hybens_info + +cp $diag_ROOT/satbias_in ./satbias_in +cp $diag_ROOT/satbias_pc ./satbias_pc +cp $diag_ROOT/satbias_angle ./satbias_angle + +# get mean +#ln -s $BK_ROOT/sfg_${gdate}_fhr06_ensmean ./sfg_${gdate}_fhr06_ensmean +ln -s $BK_ROOT/sfg_${gdate}_fhr06_ensmean ./sfg_${ANAL_TIME}_fhr06_ensmean #enkf_gfs requires sfg_ValidTime.fhr06_ensmean +list="conv amsua_metop-a amsua_n18 amsua_n15" +for type in $list; do + ln -s $diag_ROOT/diag_${type}_ges.ensmean . +done + +# get each member +imem=1 +while [[ $imem -le $NMEM_ENKF ]]; do + member="mem"`printf %03i $imem` + #ln -s $BK_ROOT/sfg_${gdate}_fhr06_${member} ./sfg_${gdate}_fhr06_${member} + ln -s $BK_ROOT/sfg_${gdate}_fhr06_${member} ./sfg_${ANAL_TIME}_fhr06_${member} ##enkf_gfs requires sfg_ValidTime.fhr06_MEMBER + list="conv amsua_metop-a amsua_n18 amsua_n15" + for type in $list; do + ln -s $diag_ROOT/diag_${type}_ges.${member} . + done + (( imem = $imem + 1 )) +done +# +################################################### +# run EnKF +################################################### +echo ' Run EnKF' + +${RUN_COMMAND} ./enkf.x < enkf.nml > stdout 2>&1 + +################################################################## +# run time error check +################################################################## +error=$? + +if [ ${error} -ne 0 ]; then + echo "ERROR: ${ENKF_EXE} crashed Exit status=${error}" + exit ${error} +fi + +exit diff --git a/ush/comenkf_run_regional.ksh b/ush/comenkf_run_regional.ksh new file mode 100755 index 000000000..04eab8e79 --- /dev/null +++ b/ush/comenkf_run_regional.ksh @@ -0,0 +1,183 @@ +#!/bin/ksh +##################################################### +# machine set up (users should change this part) +##################################################### +# + +set -x + +# +# GSIPROC = processor number used for GSI analysis +#------------------------------------------------ + GSIPROC=32 + ARCH='LINUX_LSF' + +# Supported configurations: + # IBM_LSF, + # LINUX, LINUX_LSF, LINUX_PBS, + # DARWIN_PGI +# +##################################################### +# case set up (users should change this part) +##################################################### +# +# ANAL_TIME= analysis time (YYYYMMDDHH) +# WORK_ROOT= working directory, where GSI runs +# PREPBURF = path of PreBUFR conventional obs +# OBS_ROOT = path of observations files +# FIX_ROOT = path of fix files +# ENKF_EXE = path and name of the EnKF executable + ANAL_TIME=2014021300 #used by comenkf_namelist.sh + JOB_DIR=the_job_directory + #normally you put run scripts here and submit jobs form here, require a copy of enkf_wrf.x at this directory + RUN_NAME=a_descriptive_run_name_such_as_case05_3denvar_etc + OBS_ROOT=the_directory_where_observation_files_are_located + BK_ROOT=the_directory_where_background_files_are_located + GSI_ROOT=the_comgsi_main directory where src/ ush/ fix/ etc are located + CRTM_ROOT=the_CRTM_directory + diag_ROOT=the_observer_directory_where_diag_files_exist + ENKF_EXE=${JOB_DIR}/enkf_wrf.x + WORK_ROOT=${JOB_DIR}/${RUN_NAME} + FIX_ROOT=${GSI_ROOT}/fix + ENKF_NAMELIST=${GSI_ROOT}/ush/comenkf_namelist.sh + +# ensemble parameters +# + NMEM_ENKF=20 + BK_FILE_mem=${BK_ROOT}/wrfarw + NLONS=129 + NLATS=70 + NLEVS=50 + IF_ARW=.true. + IF_NMM=.false. + list="conv" +# list="conv amsua_n18 mhs_n18 hirs4_n19" +# +##################################################### +# Users should NOT change script after this point +##################################################### +# + +case $ARCH in + 'IBM_LSF') + ###### IBM LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX') + if [ $GSIPROC = 1 ]; then + #### Linux workstation - single processor + RUN_COMMAND="" + else + ###### Linux workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + 'LINUX_LSF') + ###### LINUX LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX_PBS') + #### Linux cluster PBS (Portable Batch System) + RUN_COMMAND="mpirun -np ${GSIPROC} " ;; + + 'DARWIN_PGI') + ### Mac - mpi run + if [ $GSIPROC = 1 ]; then + #### Mac workstation - single processor + RUN_COMMAND="" + else + ###### Mac workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + * ) + print "error: $ARCH is not a supported platform configuration." + exit 1 ;; +esac + +# Given the analysis date, compute the date from which the +# first guess comes. Extract cycle and set prefix and suffix +# for guess and observation data files +# gdate=`$ndate -06 $adate` +#gdate=$ANAL_TIME +#YYYYMMDD=`echo $adate | cut -c1-8` +#HH=`echo $adate | cut -c9-10` + +# Fixed files +# CONVINFO=${FIX_ROOT}/global_convinfo.txt +# SATINFO=${FIX_ROOT}/global_satinfo.txt +# SCANINFO=${FIX_ROOT}/global_scaninfo.txt +# OZINFO=${FIX_ROOT}/global_ozinfo.txt +ANAVINFO=${diag_ROOT}/anavinfo +CONVINFO=${diag_ROOT}/convinfo +SATINFO=${diag_ROOT}/satinfo +SCANINFO=${diag_ROOT}/scaninfo +OZINFO=${diag_ROOT}/ozinfo +# LOCINFO=${FIX_ROOT}/global_hybens_locinfo.l64.txt + +# Set up workdir +rm -rf $WORK_ROOT +mkdir -p $WORK_ROOT +cd $WORK_ROOT + +cp $ENKF_EXE enkf.x + +cp $ANAVINFO ./anavinfo +cp $CONVINFO ./convinfo +cp $SATINFO ./satinfo +cp $SCANINFO ./scaninfo +cp $OZINFO ./ozinfo +# cp $LOCINFO ./hybens_locinfo + +cp $diag_ROOT/satbias_in ./satbias_in +cp $diag_ROOT/satbias_pc ./satbias_pc + +# get mean +ln -s ${BK_FILE_mem}.ensmean ./firstguess.ensmean +for type in $list; do + ln -s $diag_ROOT/diag_${type}_ges.ensmean . +done + +# get each member +imem=1 +while [[ $imem -le $NMEM_ENKF ]]; do + member="mem"`printf %03i $imem` + ln -s ${BK_FILE_mem}.${member} ./firstguess.${member} + for type in $list; do + ln -s $diag_ROOT/diag_${type}_ges.${member} . + done + (( imem = $imem + 1 )) +done + +# Build the GSI namelist on-the-fly +. $ENKF_NAMELIST + +# make analysis files +cp firstguess.ensmean analysis.ensmean +# get each member +imem=1 +while [[ $imem -le $NMEM_ENKF ]]; do + member="mem"`printf %03i $imem` + cp firstguess.${member} analysis.${member} + (( imem = $imem + 1 )) +done + +# +################################################### +# run EnKF +################################################### +echo ' Run EnKF' + +${RUN_COMMAND} ./enkf.x < enkf.nml > stdout 2>&1 + +################################################################## +# run time error check +################################################################## +error=$? + +if [ ${error} -ne 0 ]; then + echo "ERROR: ${ENKF_EXE} crashed Exit status=${error}" + exit ${error} +fi + +exit diff --git a/ush/comgsi_namelist.sh b/ush/comgsi_namelist.sh new file mode 100755 index 000000000..d9dcc9b23 --- /dev/null +++ b/ush/comgsi_namelist.sh @@ -0,0 +1,156 @@ + +cat < gsiparm.anl + + &SETUP + miter=${nummiter},niter(1)=10,niter(2)=10, + write_diag(1)=.true.,write_diag(2)=.false.,write_diag(3)=.true., + gencode=78,qoption=2, + factqmin=0.0,factqmax=0.0, + iguess=-1, + oneobtest=${if_oneobtest},retrieval=.false., + nhr_assimilation=2,l_foto=.false., + use_pbl=.false., + lread_obs_save=${if_read_obs_save},lread_obs_skip=${if_read_obs_skip}, + newpc4pred=.true.,adp_anglebc=.true.,angord=4, + passive_bc=.true.,use_edges=.false.,emiss_bc=.true., + diag_precon=.true.,step_start=1.e-3, + l4densvar=${if4d},nhr_obsbin=1,min_offset=60, + use_gfs_nemsio=${if_gfs_nemsio}, + / + &GRIDOPTS + JCAP=62,JCAP_B=62,NLAT=60,NLON=60,nsig=60,regional=.true., + wrf_nmm_regional=${bk_core_nmm},wrf_mass_regional=${bk_core_arw}, + nems_nmmb_regional=${bk_core_nmmb},nmmb_reference_grid='H',diagnostic_reg=.false., + filled_grid=.false.,half_grid=.true.,netcdf=${bk_if_netcdf}, + / + &BKGERR + vs=${vs_op} + hzscl=${hzscl_op} + bw=0.,fstat=.true., + / + &ANBKGERR + / + &JCOPTS + / + &STRONGOPTS + / + &OBSQC + dfact=0.75,dfact1=3.0,noiqc=.false.,c_varqc=0.02,vadfile='prepbufr', + / + &OBS_INPUT + dmesh(1)=120.0,dmesh(2)=60.0,dmesh(3)=30,time_window_max=1.5,ext_sonde=.true., + / +OBS_INPUT:: +! dfile dtype dplat dsis dval dthin dsfcalc + prepbufr ps null ps 1.0 0 0 + prepbufr t null t 1.0 0 0 + prepbufr q null q 1.0 0 0 + prepbufr pw null pw 1.0 0 0 + satwndbufr uv null uv 1.0 0 0 + prepbufr uv null uv 1.0 0 0 + prepbufr spd null spd 1.0 0 0 + prepbufr dw null dw 1.0 0 0 + l2rwbufr rw null rw 1.0 0 0 + prepbufr sst null sst 1.0 0 0 + gpsrobufr gps_ref null gps 1.0 0 0 + ssmirrbufr pcp_ssmi dmsp pcp_ssmi 1.0 -1 0 + tmirrbufr pcp_tmi trmm pcp_tmi 1.0 -1 0 + sbuvbufr sbuv2 n16 sbuv8_n16 1.0 0 0 + sbuvbufr sbuv2 n17 sbuv8_n17 1.0 0 0 + sbuvbufr sbuv2 n18 sbuv8_n18 1.0 0 0 + hirs3bufr hirs3 n16 hirs3_n16 0.0 1 0 + hirs3bufr hirs3 n17 hirs3_n17 6.0 1 0 + hirs4bufr hirs4 metop-a hirs4_metop-a 6.0 2 0 + hirs4bufr hirs4 n18 hirs4_n18 0.0 1 0 + hirs4bufr hirs4 n19 hirs4_n19 1.0 2 0 + hirs4bufr hirs4 metop-b hirs4_metop-b 1.0 1 0 + gimgrbufr goes_img g11 imgr_g11 0.0 1 0 + gimgrbufr goes_img g12 imgr_g12 0.0 1 0 + airsbufr airs aqua airs281SUBSET_aqua 20.0 2 0 + amsuabufr amsua n15 amsua_n15 10.0 2 0 + amsuabufr amsua n18 amsua_n18 10.0 2 0 + amsuabufr amsua n19 amsua_n19 10.0 2 0 + amsuabufr amsua metop-a amsua_metop-a 10.0 2 0 + amsuabufr amsua metop-b amsua_metop-b 10.0 2 0 + airsbufr amsua aqua amsua_aqua 5.0 2 0 + amsubbufr amsub n17 amsub_n17 1.0 1 0 + mhsbufr mhs n18 mhs_n18 3.0 2 0 + mhsbufr mhs n19 mhs_n19 3.0 2 0 + mhsbufr mhs metop-a mhs_metop-a 3.0 2 0 + mhsbufr mhs metop-b mhs_metop-b 3.0 2 0 + ssmitbufr ssmi f13 ssmi_f13 0.0 2 0 + ssmitbufr ssmi f14 ssmi_f14 0.0 2 0 + ssmitbufr ssmi f15 ssmi_f15 0.0 2 0 + amsrebufr amsre_low aqua amsre_aqua 0.0 2 0 + amsrebufr amsre_mid aqua amsre_aqua 0.0 2 0 + amsrebufr amsre_hig aqua amsre_aqua 0.0 2 0 + ssmisbufr ssmis_las f16 ssmis_f16 0.0 2 0 + ssmisbufr ssmis_uas f16 ssmis_f16 0.0 2 0 + ssmisbufr ssmis_img f16 ssmis_f16 0.0 2 0 + ssmisbufr ssmis_env f16 ssmis_f16 0.0 2 0 + gsnd1bufr sndrd1 g12 sndrD1_g12 1.5 1 0 + gsnd1bufr sndrd2 g12 sndrD2_g12 1.5 1 0 + gsnd1bufr sndrd3 g12 sndrD3_g12 1.5 1 0 + gsnd1bufr sndrd4 g12 sndrD4_g12 1.5 1 0 + gsnd1bufr sndrd1 g11 sndrD1_g11 1.5 1 0 + gsnd1bufr sndrd2 g11 sndrD2_g11 1.5 1 0 + gsnd1bufr sndrd3 g11 sndrD3_g11 1.5 1 0 + gsnd1bufr sndrd4 g11 sndrD4_g11 1.5 1 0 + gsnd1bufr sndrd1 g13 sndrD1_g13 1.5 1 0 + gsnd1bufr sndrd2 g13 sndrD2_g13 1.5 1 0 + gsnd1bufr sndrd3 g13 sndrD3_g13 1.5 1 0 + gsnd1bufr sndrd4 g13 sndrD4_g13 1.5 1 0 + gsnd1bufr sndrd1 g15 sndrD1_g15 1.5 2 0 + gsnd1bufr sndrd2 g15 sndrD2_g15 1.5 2 0 + gsnd1bufr sndrd3 g15 sndrD3_g15 1.5 2 0 + gsnd1bufr sndrd4 g15 sndrD4_g15 1.5 2 0 + iasibufr iasi metop-a iasi616_metop-a 20.0 1 0 + gomebufr gome metop-a gome_metop-a 1.0 2 0 + omibufr omi aura omi_aura 1.0 2 0 + sbuvbufr sbuv2 n19 sbuv8_n19 1.0 0 0 + tcvitl tcp null tcp 1.0 0 0 + seviribufr seviri m08 seviri_m08 1.0 1 0 + seviribufr seviri m09 seviri_m09 1.0 1 0 + seviribufr seviri m10 seviri_m10 1.0 1 0 + iasibufr iasi metop-b iasi616_metop-b 0.0 1 0 + gomebufr gome metop-b gome_metop-b 0.0 2 0 + atmsbufr atms npp atms_npp 0.0 1 0 + crisbufr cris npp cris_npp 0.0 1 0 + mlsbufr mls30 aura mls30_aura 0.0 0 0 + oscatbufr uv null uv 0.0 0 0 + prepbufr mta_cld null mta_cld 1.0 0 0 + prepbufr gos_ctp null gos_ctp 1.0 0 0 + refInGSI rad_ref null rad_ref 1.0 0 0 + lghtInGSI lghtn null lghtn 1.0 0 0 + larcglb larcglb null larcglb 1.0 0 0 +:: + &SUPEROB_RADAR + del_azimuth=5.,del_elev=.25,del_range=5000.,del_time=.5,elev_angle_max=5.,minnum=50,range_max=100000., + l2superob_only=.false., + / + &LAG_DATA + / + &HYBRID_ENSEMBLE + l_hyb_ens=${ifhyb}, + uv_hyb_ens=.true., + aniso_a_en=.false.,generate_ens=.false., + n_ens=${nummem}, + beta_s0=0.5,s_ens_h=110,s_ens_v=3, + regional_ensemble_option=1, + pseudo_hybens = .false., + grid_ratio_ens = 1, + l_ens_in_diff_time=.true., + ensemble_path='', + / + &RAPIDREFRESH_CLDSURF + / + &CHEM + / + &NST + / + &SINGLEOB_TEST + maginnov=1.0,magoberr=0.8,oneob_type='t', + oblat=38.,oblon=279.,obpres=500.,obdattim=${ANAL_TIME}, + obhourset=0., + / +EOF diff --git a/ush/comgsi_namelist_chem.sh b/ush/comgsi_namelist_chem.sh new file mode 100755 index 000000000..46b59a3db --- /dev/null +++ b/ush/comgsi_namelist_chem.sh @@ -0,0 +1,79 @@ + +cat < gsiparm.anl + + &SETUP + miter=2,niter(1)=50,niter(2)=50, + write_diag(1)=.true.,write_diag(2)=.false.,write_diag(3)=.true., + gencode=78,qoption=1, + factqmin=0.0,factqmax=0.0, + iguess=-1, + oneobtest=.false.,retrieval=.false., + nhr_assimilation=3,l_foto=.false., + use_pbl=.false., + offtime_data=.true.,diag_aero=.false., + newpc4pred=.true.,adp_anglebc=.true.,angord=4,passive_bc=.true., + use_edges=.false.,diag_precon=.false., + / + &GRIDOPTS + JCAP=62,JCAP_B=62,NLAT=60,NLON=60,nsig=60,regional=.true., + wrf_nmm_regional=.false.,wrf_mass_regional=${bk_core_arw}, + nems_nmmb_regional=.false.,nmmb_reference_grid='H',diagnostic_reg=.false., + cmaq_regional=${bk_core_cmaq}, + filled_grid=.false.,half_grid=.true.,netcdf=${bk_if_netcdf}, + / + &BKGERR + vs=1.0, + hzscl=.373,.746,1.5, + bw=0.,fstat=.true., + / + &ANBKGERR + / + &JCOPTS + / + &STRONGOPTS + / + &OBSQC + dfact=0.75,dfact1=3.0,noiqc=.false.,c_varqc=0.02,vadfile='prepbufr', + / + &OBS_INPUT + dmesh(1)=120.0,dmesh(2)=60.0,dmesh(3)=30,time_window_max=240.0,ext_sonde=.true., + / +OBS_INPUT:: +! dfile dtype dplat dsis dval dthin dsfcalc + modisbufr modis_aod terra v.modis_terra 1.0 2 0 + modisbufr modis_aod aqua v.modis_aqua 1.0 2 0 + pm25bufr pm2_5 null TEOM 1.0 0 0 +:: + &SUPEROB_RADAR + del_azimuth=5.,del_elev=.25,del_range=5000.,del_time=.5,elev_angle_max=5.,minnum=50,range_max=100000., + l2superob_only=.false., + / + &LAG_DATA + / + &HYBRID_ENSEMBLE + l_hyb_ens=.false., + / + &RAPIDREFRESH_CLDSURF + / + &CHEM + laeroana_gocart=${bk_laeroana_gocart}, + l_aoderr_table = .false., + aod_qa_limit = 3, + luse_deepblue = .false., + aero_ratios = .false., + tunable_error=0.5, + berror_chem=.true., + wrf_pm2_5=${bk_wrf_pm2_5}, + diag_incr=.true., + in_fname="cmaq_in.bin", + out_fname="cmaq_out.bin", + incr_fname="cmaq_increment.bin", + / + &NST + / + &SINGLEOB_TEST + maginnov=1.0,magoberr=0.8,oneob_type='t', + oblat=38.,oblon=279.,obpres=500.,obdattim=${ANAL_TIME}, + obhourset=0., + / +EOF diff --git a/ush/comgsi_namelist_gfs.sh b/ush/comgsi_namelist_gfs.sh new file mode 100755 index 000000000..d8a1d88be --- /dev/null +++ b/ush/comgsi_namelist_gfs.sh @@ -0,0 +1,136 @@ + +cat < gsiparm.anl + + &SETUP + miter=${nummiter},niter(1)=10,niter(2)=10, + niter_no_qc(1)=50,niter_no_qc(2)=0, + write_diag(1)=.true.,write_diag(2)=.false.,write_diag(3)=.true., + gencode=82,qoption=2,cwoption=3, + factqmin=5.0,factqmax=5.0,deltim=1200, + iguess=-1, + oneobtest=.false.,retrieval=.false.,l_foto=.false., + use_pbl=.false.,use_compress=.true.,nsig_ext=12,gpstop=50., + use_gfs_nemsio=.false.,lrun_subdirs=.false., + newpc4pred=.true.,adp_anglebc=.true.,angord=4,passive_bc=.true.,use_edges=.false.,diag_precon=.true.,step_start=1.0e-3,emiss_bc=.true.,cwoption=3, + deltim=$DELTIM, + lread_obs_save=${if_read_obs_save},lread_obs_skip=${if_read_obs_skip}, + / + &GRIDOPTS + JCAP=$JCAP,JCAP_B=$JCAP_B,NLAT=$NLAT,NLON=$LONA,nsig=$LEVS, + regional=.false.,nlayers(63)=3,nlayers(64)=6, + / + &BKGERR + vs=${vs_op} + hzscl=${hzscl_op} + hswgt=0.45,0.3,0.25, + bw=0.0,norsp=4, + bkgv_flowdep=.true.,bkgv_rewgtfct=1.5, + / + &ANBKGERR + / + &JCOPTS + / + &STRONGOPTS + / + &OBSQC + dfact=0.75,dfact1=3.0,noiqc=.false.,c_varqc=0.02,vadfile='prepbufr', + / + &OBS_INPUT + dmesh(1)=1450.0,dmesh(2)=1500.0,time_window_max=0.5,ext_sonde=.true., + / +OBS_INPUT:: +! dfile dtype dplat dsis dval dthin dsfcalc + prepbufr ps null ps 0.0 0 0 + prepbufr t null t 0.0 0 0 + prepbufr q null q 0.0 0 0 + prepbufr pw null pw 0.0 0 0 + satwndbufr uv null uv 0.0 0 0 + prepbufr uv null uv 0.0 0 0 + prepbufr spd null spd 0.0 0 0 + prepbufr dw null dw 0.0 0 0 + radarbufr rw null l3rw 0.0 0 0 + l2rwbufr rw null l2rw 0.0 0 0 + prepbufr sst null sst 0.0 0 0 + gpsrobufr gps_bnd null gps 0.0 0 0 + ssmirrbufr pcp_ssmi dmsp pcp_ssmi 0.0 -1 0 + tmirrbufr pcp_tmi trmm pcp_tmi 0.0 -1 0 + sbuvbufr sbuv2 n16 sbuv8_n16 0.0 0 0 + sbuvbufr sbuv2 n17 sbuv8_n17 0.0 0 0 + sbuvbufr sbuv2 n18 sbuv8_n18 0.0 0 0 + hirs3bufr hirs3 n17 hirs3_n17 0.0 1 1 + hirs4bufr hirs4 metop-a hirs4_metop-a 0.0 1 1 + gimgrbufr goes_img g11 imgr_g11 0.0 1 0 + gimgrbufr goes_img g12 imgr_g12 0.0 1 0 + airsbufr airs aqua airs_aqua 0.0 1 1 + amsuabufr amsua n15 amsua_n15 0.0 1 1 + amsuabufr amsua n18 amsua_n18 0.0 1 1 + amsuabufr amsua metop-a amsua_metop-a 0.0 1 1 + airsbufr amsua aqua amsua_aqua 0.0 1 1 + amsubbufr amsub n17 amsub_n17 0.0 1 1 + mhsbufr mhs n18 mhs_n18 0.0 1 1 + mhsbufr mhs metop-a mhs_metop-a 0.0 1 1 + ssmitbufr ssmi f15 ssmi_f15 0.0 1 0 + amsrebufr amsre_low aqua amsre_aqua 0.0 1 0 + amsrebufr amsre_mid aqua amsre_aqua 0.0 1 0 + amsrebufr amsre_hig aqua amsre_aqua 0.0 1 0 + ssmisbufr ssmis_las f16 ssmis_f16 0.0 1 0 + ssmisbufr ssmis_uas f16 ssmis_f16 0.0 1 0 + ssmisbufr ssmis_img f16 ssmis_f16 0.0 1 0 + ssmisbufr ssmis_env f16 ssmis_f16 0.0 1 0 + gsnd1bufr sndrd1 g12 sndrD1_g12 0.0 1 0 + gsnd1bufr sndrd2 g12 sndrD2_g12 0.0 1 0 + gsnd1bufr sndrd3 g12 sndrD3_g12 0.0 1 0 + gsnd1bufr sndrd4 g12 sndrD4_g12 0.0 1 0 + gsnd1bufr sndrd1 g11 sndrD1_g11 0.0 1 0 + gsnd1bufr sndrd2 g11 sndrD2_g11 0.0 1 0 + gsnd1bufr sndrd3 g11 sndrD3_g11 0.0 1 0 + gsnd1bufr sndrd4 g11 sndrD4_g11 0.0 1 0 + gsnd1bufr sndrd1 g13 sndrD1_g13 0.0 1 0 + gsnd1bufr sndrd2 g13 sndrD2_g13 0.0 1 0 + gsnd1bufr sndrd3 g13 sndrD3_g13 0.0 1 0 + gsnd1bufr sndrd4 g13 sndrD4_g13 0.0 1 0 + iasibufr iasi metop-a iasi_metop-a 0.0 1 1 + gomebufr gome metop-a gome_metop-a 0.0 2 0 + omibufr omi aura omi_aura 0.0 2 0 + sbuvbufr sbuv2 n19 sbuv8_n19 0.0 0 0 + hirs4bufr hirs4 n19 hirs4_n19 0.0 1 1 + amsuabufr amsua n19 amsua_n19 0.0 1 1 + mhsbufr mhs n19 mhs_n19 0.0 1 1 + tcvitl tcp null tcp 0.0 0 0 + seviribufr seviri m08 seviri_m08 0.0 1 0 + seviribufr seviri m09 seviri_m09 0.0 1 0 + seviribufr seviri m10 seviri_m10 0.0 1 0 + hirs4bufr hirs4 metop-b hirs4_metop-b 0.0 1 0 + amsuabufr amsua metop-b amsua_metop-b 0.0 1 0 + mhsbufr mhs metop-b mhs_metop-b 0.0 1 0 + iasibufr iasi metop-b iasi_metop-b 0.0 1 0 + gomebufr gome metop-b gome_metop-b 0.0 2 0 + atmsbufr atms npp atms_npp 0.0 1 0 + atmsbufr atms n20 atms_n20 0.0 1 0 + crisbufr cris npp cris_npp 0.0 1 0 + crisfsbufr cris-fsr npp cris-fsr_npp 0.0 1 0 + crisfsbufr cris-fsr n20 cris-fsr_n20 0.0 1 0 + mlsbufr mls30 aura mls30_aura 0.0 0 0 + oscatbufr uv null uv 0.0 0 0 +:: + &SUPEROB_RADAR + del_azimuth=5.,del_elev=.25,del_range=5000.,del_time=.5,elev_angle_max=5.,minnum=50,range_max=100000., + l2superob_only=.false., + / + &LAG_DATA + / + &HYBRID_ENSEMBLE + l_hyb_ens=.false., + / + &RAPIDREFRESH_CLDSURF + / + &CHEM + / + &NST + / + &SINGLEOB_TEST + maginnov=1.0,magoberr=0.8,oneob_type='t', + oblat=38.,oblon=279.,obpres=500.,obdattim=${ANAL_TIME}, + obhourset=0., + / +EOF diff --git a/ush/comgsi_run_chem.ksh b/ush/comgsi_run_chem.ksh new file mode 100755 index 000000000..7a1a3c4c0 --- /dev/null +++ b/ush/comgsi_run_chem.ksh @@ -0,0 +1,448 @@ +#!/bin/ksh +##################################################### +# machine set up (users should change this part) +##################################################### + +set -x +# +# GSIPROC = processor number used for GSI analysis +#------------------------------------------------ + GSIPROC=1 + ARCH='LINUX_LSF' + +# Supported configurations: + # IBM_LSF, + # LINUX, LINUX_LSF, LINUX_PBS, + # DARWIN_PGI +# this script can run 4 GSI chem cases +# 1. WRF-Chem GOCART with MODIS AOD observation +# bk_core=WRFCHEM_GOCART and obs_type=MODISAOD +# background= wrfinput_enkf_d01_2012-06-03_18:00:00 +# observations=Aqua_Terra_AOD_BUFR:2012-06-03_00:00:00 +# 2. WRF-Chem GOCART with PM25 observation +# bk_core=WRFCHEM_GOCART and obs_type=PM25 +# background= wrfinput_enkf_d01_2012-06-03_18:00:00 +# observations=anow.2012060318.bufr +# 3. WRF-Chem PM25 with MP25 observation +# bk_core=WRFCHEM_PM25 and obs_type=PM25 +# background= wrfinput_enkf_d01_2012-06-03_18:00:00 +# observations=anow.2012060318.bufr +# 4. CMAQ with MP25 observation +# bk_core=CMAQ and obs_type=PM25 +# background= cmaq2gsi_4.7_20130621_120000.bin +# observations=anow.2013062112.bufr +# +##################################################### +# case set up (users should change this part) +##################################################### +# +# ANAL_TIME= analysis time (YYYYMMDDHH) +# WORK_ROOT= working directory, where GSI runs +# PREPBURF = path of PreBUFR conventional obs +# BK_FILE = path and name of background file +# OBS_ROOT = path of observations files +# FIX_ROOT = path of fix files +# GSI_EXE = path and name of the gsi executable + ANAL_TIME=2012060318 + JOB_DIR=the_job_directory + #normally you put run scripts here and submit jobs form here, require a copy of gsi.x at this directory + RUN_NAME=a_descriptive_run_name_such_as_case05_3denvar_etc + OBS_ROOT=the_directory_where_observation_files_are_located + BK_ROOT=the_directory_where_background_files_are_located + GSI_ROOT=the_comgsi_main directory where src/ ush/ fix/ etc are located + CRTM_ROOT=the_CRTM_directory + GSI_EXE=${JOB_DIR}/gsi.x #assume you have a copy of gsi.x here + WORK_ROOT=${JOB_DIR}/${RUN_NAME} + FIX_ROOT=${GSI_ROOT}/fix + GSI_NAMELIST=${GSI_ROOT}/ush/comgsi_namelist_chem.sh + PREPBUFR=${OBS_ROOT}/anow.2012060318.bufr + BK_FILE=${BK_ROOT}/wrfinput_d01_2012-06-03_18:00:00 +# +#------------------------------------------------ +# bk_core= set background (WRFCHEM_GOCART WRFCHEM_PM25 or CMAQ) +# obs_type= set observation type (MODISAOD or PM25) +# if_clean = clean : delete temperal files in working directory (default) +# no : leave running directory as is (this is for debug only) + bk_core=WRFCHEM_GOCART + obs_type=PM25 + if_clean=clean +# +# +##################################################### +# Users should NOT make changes after this point +##################################################### +# +BYTE_ORDER=Big_Endian +# BYTE_ORDER=Little_Endian + +case $ARCH in + 'IBM_LSF') + ###### IBM LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX') + if [ $GSIPROC = 1 ]; then + #### Linux workstation - single processor + RUN_COMMAND="" + else + ###### Linux workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + 'LINUX_LSF') + ###### LINUX LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX_PBS') + #### Linux cluster PBS (Portable Batch System) + RUN_COMMAND="mpirun -np ${GSIPROC} " ;; + + 'DARWIN_PGI') + ### Mac - mpi run + if [ $GSIPROC = 1 ]; then + #### Mac workstation - single processor + RUN_COMMAND="" + else + ###### Mac workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + * ) + print "error: $ARCH is not a supported platform configuration." + exit 1 ;; +esac + + +################################################################################## +# Check GSI needed environment variables are defined and exist +# + +# Make sure ANAL_TIME is defined and in the correct format +if [ ! "${ANAL_TIME}" ]; then + echo "ERROR: \$ANAL_TIME is not defined!" + exit 1 +fi + +# Make sure WORK_ROOT is defined and exists +if [ ! "${WORK_ROOT}" ]; then + echo "ERROR: \$WORK_ROOT is not defined!" + exit 1 +fi + +# Make sure the background file exists +if [ ! -r "${BK_FILE}" ]; then + echo "ERROR: ${BK_FILE} does not exist!" + exit 1 +fi + +# Make sure OBS_ROOT is defined and exists +if [ ! "${OBS_ROOT}" ]; then + echo "ERROR: \$OBS_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${OBS_ROOT}" ]; then + echo "ERROR: OBS_ROOT directory '${OBS_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the GSI static files +if [ ! "${FIX_ROOT}" ]; then + echo "ERROR: \$FIX_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${FIX_ROOT}" ]; then + echo "ERROR: fix directory '${FIX_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the CRTM coefficients +if [ ! "${CRTM_ROOT}" ]; then + echo "ERROR: \$CRTM_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${CRTM_ROOT}" ]; then + echo "ERROR: fix directory '${CRTM_ROOT}' does not exist!" + exit 1 +fi + + +# Make sure the GSI executable exists +if [ ! -x "${GSI_EXE}" ]; then + echo "ERROR: ${GSI_EXE} does not exist!" + exit 1 +fi + +# Check to make sure the number of processors for running GSI was specified +if [ -z "${GSIPROC}" ]; then + echo "ERROR: The variable $GSIPROC must be set to contain the number of processors to run GSI" + exit 1 +fi + +# +################################################################################## +# Create the ram work directory and cd into it + +workdir=${WORK_ROOT} +echo " Create working directory:" ${workdir} + +if [ -d "${workdir}" ]; then + rm -rf ${workdir} +fi +mkdir -p ${workdir} +cd ${workdir} + +# +################################################################################## + +echo " Copy GSI executable, background file, and link observation bufr to working directory" + +# Save a copy of the GSI executable in the workdir +cp ${GSI_EXE} gsi.x + +# Bring over background field (it's modified by GSI so we can't link to it) + +if [ ${bk_core} = WRFCHEM_GOCART ] ; then + cp ${BK_FILE} ./wrf_inout +fi +if [ ${bk_core} = WRFCHEM_PM25 ] ; then + cp ${BK_FILE} ./wrf_inout +fi +if [ ${bk_core} = CMAQ ] ; then + cp ${BK_FILE} ./cmaq_in.bin +fi + +# Link to the observation data +if [ ${obs_type} = MODISAOD ] ; then + ln -s ${PREPBUFR} ./modisbufr +fi +if [ ${obs_type} = PM25 ] ; then + ln -s ${PREPBUFR} ./pm25bufr +fi +# +################################################################################## + +echo " Copy fixed files and link CRTM coefficient files to working directory" + +# Set fixed files +# berror = forecast model background error statistics +# specoef = CRTM spectral coefficients +# trncoef = CRTM transmittance coefficients +# emiscoef = CRTM coefficients for IR sea surface emissivity model +# aerocoef = CRTM coefficients for aerosol effects +# cldcoef = CRTM coefficients for cloud effects +# satinfo = text file with information about assimilation of brightness temperatures +# satangl = angle dependent bias correction file (fixed in time) +# pcpinfo = text file with information about assimilation of prepcipitation rates +# ozinfo = text file with information about assimilation of ozone data +# errtable = text file with obs error for conventional data (regional only) +# convinfo = text file with information about assimilation of conventional data +# bufrtable= text file ONLY needed for single obs test (oneobstest=.true.) +# bftab_sst= bufr table for sst ONLY needed for sst retrieval (retrieval=.true.) + +if [ ${bk_core} = WRFCHEM_GOCART ] ; then + BERROR=${FIX_ROOT}/${BYTE_ORDER}/wrf_chem_berror_big_endian + BERROR_CHEM=${FIX_ROOT}/${BYTE_ORDER}/wrf_chem_berror_big_endian + ANAVINFO=${FIX_ROOT}/anavinfo_wrfchem_gocart +fi +if [ ${bk_core} = WRFCHEM_PM25 ] ; then + BERROR=${FIX_ROOT}/${BYTE_ORDER}/wrf_chem_berror_big_endian + BERROR_CHEM=${FIX_ROOT}/${BYTE_ORDER}/wrf_chem_berror_big_endian + ANAVINFO=${FIX_ROOT}/anavinfo_wrfchem_pm25 +fi +if [ ${bk_core} = CMAQ ] ; then + BERROR=${FIX_ROOT}/${BYTE_ORDER}/cmaq_berror_big_endian + BERROR_CHEM=${FIX_ROOT}/${BYTE_ORDER}/cmaq_berror_big_endian + ANAVINFO=${FIX_ROOT}/anavinfo_cmaq_pm25 +fi + +AEROINFO=${FIX_ROOT}/aeroinfo_aod.txt +OBERROR=${FIX_ROOT}/nam_errtable.r3dv +SATANGL=${FIX_ROOT}/global_satangbias.txt +SATINFO=${FIX_ROOT}/global_satinfo.txt +CONVINFO=${FIX_ROOT}/global_convinfo.txt +OZINFO=${FIX_ROOT}/global_ozinfo.txt +PCPINFO=${FIX_ROOT}/global_pcpinfo.txt + +# copy Fixed fields to working directory + cp $ANAVINFO anavinfo + cp $BERROR berror_stats + cp $BERROR_CHEM berror_stats_chem + cp $SATANGL satbias_angle + cp $SATINFO satinfo + cp $CONVINFO convinfo + cp $OZINFO ozinfo + cp $PCPINFO pcpinfo + cp $OBERROR errtable + cp $AEROINFO aeroinfo +# +# # CRTM Spectral and Transmittance coefficients +CRTM_ROOT_ORDER=${CRTM_ROOT}/${BYTE_ORDER} +emiscoef_IRwater=${CRTM_ROOT_ORDER}/Nalli.IRwater.EmisCoeff.bin +emiscoef_IRice=${CRTM_ROOT_ORDER}/NPOESS.IRice.EmisCoeff.bin +emiscoef_IRland=${CRTM_ROOT_ORDER}/NPOESS.IRland.EmisCoeff.bin +emiscoef_IRsnow=${CRTM_ROOT_ORDER}/NPOESS.IRsnow.EmisCoeff.bin +emiscoef_VISice=${CRTM_ROOT_ORDER}/NPOESS.VISice.EmisCoeff.bin +emiscoef_VISland=${CRTM_ROOT_ORDER}/NPOESS.VISland.EmisCoeff.bin +emiscoef_VISsnow=${CRTM_ROOT_ORDER}/NPOESS.VISsnow.EmisCoeff.bin +emiscoef_VISwater=${CRTM_ROOT_ORDER}/NPOESS.VISwater.EmisCoeff.bin +emiscoef_MWwater=${CRTM_ROOT_ORDER}/FASTEM6.MWwater.EmisCoeff.bin +aercoef=${CRTM_ROOT_ORDER}/AerosolCoeff.bin +cldcoef=${CRTM_ROOT_ORDER}/CloudCoeff.bin + +ln -s $emiscoef_IRwater ./Nalli.IRwater.EmisCoeff.bin +ln -s $emiscoef_IRice ./NPOESS.IRice.EmisCoeff.bin +ln -s $emiscoef_IRsnow ./NPOESS.IRsnow.EmisCoeff.bin +ln -s $emiscoef_IRland ./NPOESS.IRland.EmisCoeff.bin +ln -s $emiscoef_VISice ./NPOESS.VISice.EmisCoeff.bin +ln -s $emiscoef_VISland ./NPOESS.VISland.EmisCoeff.bin +ln -s $emiscoef_VISsnow ./NPOESS.VISsnow.EmisCoeff.bin +ln -s $emiscoef_VISwater ./NPOESS.VISwater.EmisCoeff.bin +ln -s $emiscoef_MWwater ./FASTEM6.MWwater.EmisCoeff.bin +ln -s $aercoef ./AerosolCoeff.bin +ln -s $cldcoef ./CloudCoeff.bin +# Copy CRTM coefficient files based on entries in satinfo file +for file in `awk '{if($1!~"!"){print $1}}' ./satinfo | sort | uniq` ;do + ln -s ${CRTM_ROOT_ORDER}/${file}.SpcCoeff.bin ./ + ln -s ${CRTM_ROOT_ORDER}/${file}.TauCoeff.bin ./ +done + +for file in `awk '{if($1!~"!"){print $1}}' ./aeroinfo | sort | uniq` ;do + ln -s ${CRTM_ROOT_ORDER}/${file}.SpcCoeff.bin ./ + ln -s ${CRTM_ROOT_ORDER}/${file}.TauCoeff.bin ./ +done + +# Only need this file for single obs test + bufrtable=${FIX_ROOT}/prepobs_prep.bufrtable + cp $bufrtable ./prepobs_prep.bufrtable + +# for satellite bias correction +# Users may need to use their own satbias files for correct bias correction +cp ${GSI_ROOT}/fix/comgsi_satbias_in ./satbias_in +cp ${GSI_ROOT}/fix/comgsi_satbias_pc_in ./satbias_pc_in + +# +################################################################################## +# Set some parameters for use by the GSI executable and to build the namelist +echo " Build the namelist " + +if [ ${bk_core} = WRFCHEM_GOCART ] ; then + bk_core_arw='.true.' + bk_if_netcdf='.true.' + bk_core_cmaq='.false.' + bk_wrf_pm2_5='.false.' + bk_laeroana_gocart='.true.' +fi +if [ ${bk_core} = WRFCHEM_PM25 ] ; then + bk_core_arw='.true.' + bk_if_netcdf='.true.' + bk_core_cmaq='.false.' + bk_wrf_pm2_5='.true.' + bk_laeroana_gocart='.false.' +fi +if [ ${bk_core} = CMAQ ] ; then + bk_core_arw='.false.' + bk_if_netcdf='.false.' + bk_core_cmaq='.true.' + bk_wrf_pm2_5='.false.' + bk_laeroana_gocart='.false.' +fi + +# Build the GSI namelist on-the-fly +. $GSI_NAMELIST + +# +################################################### +# run GSI +################################################### +echo ' Run GSI with' ${bk_core} 'background' + +case $ARCH in + 'IBM_LSF') + ${RUN_COMMAND} ./gsi.x < gsiparm.anl > stdout 2>&1 ;; + + * ) + ${RUN_COMMAND} ./gsi.x > stdout 2>&1 ;; +esac + +################################################################## +# run time error check +################################################################## +error=$? + +if [ ${error} -ne 0 ]; then + echo "ERROR: ${GSI} crashed Exit status=${error}" + exit ${error} +fi + +# +################################################################## +# +# GSI updating satbias_in +# +# GSI updating satbias_in (only for cycling assimilation) + +# Copy the output to more understandable names +ln -s stdout stdout.anl.${ANAL_TIME} +ln -s wrf_inout wrfanl.${ANAL_TIME} +ln -s fort.201 fit_p1.${ANAL_TIME} +ln -s fort.202 fit_w1.${ANAL_TIME} +ln -s fort.203 fit_t1.${ANAL_TIME} +ln -s fort.204 fit_q1.${ANAL_TIME} +ln -s fort.207 fit_rad1.${ANAL_TIME} + +# Loop over first and last outer loops to generate innovation +# diagnostic files for indicated observation types (groups) +# +# NOTE: Since we set miter=2 in GSI namelist SETUP, outer +# loop 03 will contain innovations with respect to +# the analysis. Creation of o-a innovation files +# is triggered by write_diag(3)=.true. The setting +# write_diag(1)=.true. turns on creation of o-g +# innovation files. +# + +loops="01 03" +for loop in $loops; do + +case $loop in + 01) string=ges;; + 03) string=anl;; + *) string=$loop;; +esac + +# Collect diagnostic files for obs types (groups) below +# listall="conv amsua_metop-a mhs_metop-a hirs4_metop-a hirs2_n14 msu_n14 \ +# sndr_g08 sndr_g10 sndr_g12 sndr_g08_prep sndr_g10_prep sndr_g12_prep \ +# sndrd1_g08 sndrd2_g08 sndrd3_g08 sndrd4_g08 sndrd1_g10 sndrd2_g10 \ +# sndrd3_g10 sndrd4_g10 sndrd1_g12 sndrd2_g12 sndrd3_g12 sndrd4_g12 \ +# hirs3_n15 hirs3_n16 hirs3_n17 amsua_n15 amsua_n16 amsua_n17 \ +# amsub_n15 amsub_n16 amsub_n17 hsb_aqua airs_aqua amsua_aqua \ +# goes_img_g08 goes_img_g10 goes_img_g11 goes_img_g12 \ +# pcp_ssmi_dmsp pcp_tmi_trmm sbuv2_n16 sbuv2_n17 sbuv2_n18 \ +# omi_aura ssmi_f13 ssmi_f14 ssmi_f15 hirs4_n18 amsua_n18 mhs_n18 \ +# amsre_low_aqua amsre_mid_aqua amsre_hig_aqua ssmis_las_f16 \ +# ssmis_uas_f16 ssmis_img_f16 ssmis_env_f16 mhs_metop_b \ +# hirs4_metop_b hirs4_n19 amusa_n19 mhs_n19" + listall=`ls pe* | cut -f2 -d"." | awk '{print substr($0, 0, length($0)-3)}' | sort | uniq ` + + for type in $listall; do + count=`ls pe*${type}_${loop}* | wc -l` + if [[ $count -gt 0 ]]; then + cat pe*${type}_${loop}* > diag_${type}_${string}.${ANAL_TIME} + fi + done +done + +# Clean working directory to save only important files +ls -l * > list_run_directory +if [[ ${if_clean} = clean ]]; then + echo ' Clean working directory after GSI run' + rm -f *Coeff.bin # all CRTM coefficient files + rm -f pe0* # diag files on each processor + rm -f obs_input.* # observation middle files + rm -f siganl sigf0? # background middle files + rm -f fsize_* # delete temperal file for bufr size +fi +# +# +exit 0 diff --git a/ush/comgsi_run_gfs.ksh b/ush/comgsi_run_gfs.ksh new file mode 100755 index 000000000..4d5e9c133 --- /dev/null +++ b/ush/comgsi_run_gfs.ksh @@ -0,0 +1,734 @@ +#!/bin/ksh +##################################################### +# machine set up (users should change this part) +##################################################### + + +set -x +# GSIPROC = processor number used for GSI analysis +#------------------------------------------------ + GSIPROC=1 + ARCH='LINUX_LSF' +# Supported configurations: + # IBM_LSF, + # LINUX, LINUX_LSF, LINUX_PBS, + # DARWIN_PGI +# +##################################################### +# case set up (users should change this part) +##################################################### +# +# GFSCASE = cases used for DTC test +# T574, T254, T126, T62, enkf_glb_t62 +# ANAL_TIME= analysis time (YYYYMMDDHH) +# WORK_ROOT= working directory, where GSI runs +# PREPBURF = path of PreBUFR conventional obs +# BK_ROOT = path of background files +# OBS_ROOT = path of observations files +# FIX_ROOT = path of fix files +# GSI_EXE = path and name of the gsi executable + ANAL_TIME=2014080400 + GFSCASE=T62 + JOB_DIR=the_job_directory + #normally you put run scripts here and submit jobs form here, require a copy of gsi.x at this directory + RUN_NAME=a_descriptive_run_name_such_as_case05_3denvar_etc + OBS_ROOT=the_directory_where_observation_files_are_located + BK_ROOT=the_directory_where_background_files_are_located + GSI_ROOT=the_comgsi_main directory where src/ ush/ fix/ etc are located + CRTM_ROOT=the_CRTM_directory + GSI_EXE=${JOB_DIR}/gsi.x #assume you have a copy of gsi.x here + WORK_ROOT=${JOB_DIR}/${RUN_NAME} + FIX_ROOT=${GSI_ROOT}/fix + GSI_NAMELIST=${GSI_ROOT}/ush/comgsi_namelist_gfs.sh + PREPBUFR=${OBS_ROOT}/prepbufr + FIX_ROOT=${GSI_ROOT}/fix +# +# ENS_ROOT=the_directory_where_ensemble_backgrounds_are_located +#------------------------------------------------ +# if_clean = clean : delete temperal files in working directory (default) +# no : leave running directory as is (this is for debug only) + if_clean=clean + +# if_observer = Yes : only used as observation operater for enkf +# no_member number of ensemble members +# BK_FILE_mem path and base for ensemble members + if_observer=No # Yes ,or, No -- case sensitive!!! + no_member=10 + PDYa=`echo $ANAL_TIME | cut -c1-8` + cyca=`echo $ANAL_TIME | cut -c9-10` + gdate=`date -u -d "$PDYa $cyca -6 hour" +%Y%m%d%H` #guess date is 6hr ago + BK_FILE_mem=${BK_ROOT}/sfg_${gdate} + +# +# Set the JCAP resolution which you want. +# All resolutions use LEVS=64 +if [[ "$GFSCASE" = "T62" ]]; then + JCAP=62 + JCAP_B=62 +elif [[ "$GFSCASE" = "T126" ]]; then + JCAP=126 + JCAP_B=126 +elif [[ "$GFSCASE" = "enkf_glb_t62" ]]; then + JCAP=62 + JCAP_B=62 +elif [[ "$GFSCASE" = "T254" ]]; then + JCAP=254 + JCAP_B=574 +elif [[ "$GFSCASE" = "T574" ]]; then + JCAP=574 + JCAP_B=1534 +else + echo "INVALID case = $GFSCASE" + exit +fi + LEVS=64 +# +# + BYTE_ORDER=Big_Endian +# +##################################################### +# Users should NOT change script after this point +##################################################### +# + +case $ARCH in + 'IBM_LSF') + ###### IBM LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX') + if [ $GSIPROC = 1 ]; then + #### Linux workstation - single processor + RUN_COMMAND="" + else + ###### Linux workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + 'LINUX_LSF') + ###### LINUX LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX_PBS') + #### Linux cluster PBS (Portable Batch System) + RUN_COMMAND="mpirun -np ${GSIPROC} " ;; + + 'DARWIN_PGI') + ### Mac - mpi run + if [ $GSIPROC = 1 ]; then + #### Mac workstation - single processor + RUN_COMMAND="" + else + ###### Mac workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + * ) + print "error: $ARCH is not a supported platform configuration." + exit 1 ;; +esac + + +################################################################################## +# Check GSI needed environment variables are defined and exist +# + +# Make sure ANAL_TIME is defined and in the correct format +if [ ! "${ANAL_TIME}" ]; then + echo "ERROR: \$ANAL_TIME is not defined!" + exit 1 +fi + +# Make sure WORK_ROOT is defined and exists +if [ ! "${WORK_ROOT}" ]; then + echo "ERROR: \$WORK_ROOT is not defined!" + exit 1 +fi + +# Make sure the background file exists +if [ ! -r "${BK_ROOT}" ]; then + echo "ERROR: ${BK_ROOT} does not exist!" + exit 1 +fi + +# Make sure OBS_ROOT is defined and exists +if [ ! "${OBS_ROOT}" ]; then + echo "ERROR: \$OBS_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${OBS_ROOT}" ]; then + echo "ERROR: OBS_ROOT directory '${OBS_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the GSI static files +if [ ! "${FIX_ROOT}" ]; then + echo "ERROR: \$FIX_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${FIX_ROOT}" ]; then + echo "ERROR: fix directory '${FIX_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the CRTM coefficients +if [ ! "${CRTM_ROOT}" ]; then + echo "ERROR: \$CRTM_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${CRTM_ROOT}" ]; then + echo "ERROR: fix directory '${CRTM_ROOT}' does not exist!" + exit 1 +fi + + +# Make sure the GSI executable exists +if [ ! -x "${GSI_EXE}" ]; then + echo "ERROR: ${GSI_EXE} does not exist!" + exit 1 +fi + +# Check to make sure the number of processors for running GSI was specified +if [ -z "${GSIPROC}" ]; then + echo "ERROR: The variable $GSIPROC must be set to contain the number of processors to run GSI" + exit 1 +fi + +################################################################################ +## Given the analysis date, compute the date from which the +# first guess comes. Extract cycle and set prefix +# for guess and observation data files +#hha=`echo $ANAL_TIME | cut -c9-10` +#hhg=`echo $GUESS_TIME | cut -c9-10` + +# +################################################################################## +# Create the ram work directory and cd into it + +workdir=${WORK_ROOT} +echo " Create working directory:" ${workdir} + +if [ -d "${workdir}" ]; then + rm -rf ${workdir} +fi +mkdir -p ${workdir} +cd ${workdir} + +# +################################################################################## +# Set some parameters for use by the GSI executable and to build the namelist +echo " Build the namelist " + +# Given the requested resolution, set dependent resolution parameters +if [[ "$JCAP" = "382" ]]; then + LONA=768 + LATA=384 + DELTIM=180 + resol=1 +elif [[ "$JCAP" = "574" ]]; then + LONA=1152 + LATA=576 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "254" ]]; then + LONA=512 + LATA=256 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "126" ]]; then + LONA=256 + LATA=128 + DELTIM=1200 + resol=2 +elif [[ "$JCAP" = "62" ]]; then + LONA=192 + LATA=94 + DELTIM=1200 + resol=2 +else + echo "INVALID JCAP = $JCAP" + exit +fi +NLAT=` expr $LATA + 2 ` + +# CO2 namelist and file decisions +ICO2=${ICO2:-0} +if [ $ICO2 -gt 0 ] ; then + # Copy co2 files to $workdir + co2dir=${FIX_ROOT} + yyyy=`echo $ANAL_TIME | cut -c1-4` + rm ./global_co2_data.txt + co2=$co2dir/global_co2.gcmscl_$yyyy.txt + while [ ! -s $co2 ] ; do + ((yyyy-=1)) + co2=$co2dir/global_co2.gcmscl_$yyyy.txt + done + if [ -s $co2 ] ; then + cp $co2 ./global_co2_data.txt + fi + if [ ! -s ./global_co2_data.txt ] ; then + echo "\./global_co2_data.txt" not created + exit 1 + fi +fi +#CH4 file decision +ICH4=${ICH4:-0} +if [ $ICH4 -gt 0 ] ; then +# # Copy ch4 files to $workdir + ch4dir=${FIX_ROOT} + yyyy=`echo $ANAL_TIME | cut -c1-4` + rm ./ch4globaldata.txt + ch4=$ch4dir/global_ch4_esrlctm_$yyyy.txt + while [ ! -s $ch4 ] ; do + ((yyyy-=1)) + ch4=$ch4dir/global_ch4_esrlctm_$yyyy.txt + done + if [ -s $ch4 ] ; then + cp $ch4 ./ch4globaldata.txt + fi + if [ ! -s ./ch4globaldata.txt ] ; then + echo "\./ch4globaldata.txt" not created + exit 1 + fi +fi +IN2O=${IN2O:-0} +if [ $IN2O -gt 0 ] ; then +# # Copy ch4 files to $workdir + n2odir=${FIX_ROOT} + yyyy=`echo $ANAL_TIME | cut -c1-4` + rm ./n2oglobaldata.txt + n2o=$n2odir/global_n2o_esrlctm_$yyyy.txt + while [ ! -s $n2o ] ; do + ((yyyy-=1)) + n2o=$n2odir/global_n2o_esrlctm_$yyyy.txt + done + if [ -s $n2o ] ; then + cp $n2o ./n2oglobaldata.txt + fi + if [ ! -s ./n2oglobaldata.txt ] ; then + echo "\./n2oglobaldata.txt" not created + exit 1 + fi +fi +ICO=${ICO:-0} +if [ $ICO -gt 0 ] ; then +# # Copy CO files to $workdir + codir=${FIX_ROOT} + yyyy=`echo $ANAL_TIME | cut -c1-4` + rm ./coglobaldata.txt + co=$codir/global_co_esrlctm_$yyyy.txt + while [ ! -s $co ] ; do + ((yyyy-=1)) + co=$codir/global_co_esrlctm_$yyyy.txt + done + if [ -s $co ] ; then + cp $co ./coglobaldata.txt + fi + if [ ! -s ./coglobaldata.txt ] ; then + echo "\./coglobaldata.txt" not created + exit 1 + fi +fi + +################################################################################## +# Set some parameters for use by the GSI executable and to build the namelist +echo " Build the namelist " + +vs_op='0.7,' +hzscl_op='1.7,0.8,0.5,' + +if [ ${if_observer} = Yes ] ; then + nummiter=0 + if_read_obs_save='.true.' + if_read_obs_skip='.false.' +else + nummiter=2 + if_read_obs_save='.false.' + if_read_obs_skip='.false.' +fi + +# Build the GSI namelist on-the-fly +. $GSI_NAMELIST + +################################################################################## + +echo " Copy GSI executable, background file, and link observation bufr to working directory" + +# Save a copy of the GSI executable in the workdir +cp ${GSI_EXE} gsi.x + +# Bring over background field (it's modified by GSI so we can't link to it) +# Copy bias correction, atmospheric and surface files +if [[ "$GFSCASE" = "enkf_glb_t62" ]]; then + cp $BK_ROOT/bfg_${gdate}_fhr03_ensmean ./sfcf03 + cp $BK_ROOT/bfg_${gdate}_fhr06_ensmean ./sfcf06 + cp $BK_ROOT/bfg_${gdate}_fhr09_ensmean ./sfcf09 + + cp $BK_ROOT/sfg_${gdate}_fhr03_mem001 ./sigf03 + cp $BK_ROOT/sfg_${gdate}_fhr06_mem001 ./sigf06 + cp $BK_ROOT/sfg_${gdate}_fhr09_mem001 ./sigf09 +else + + cp $BK_ROOT/sfcf03 ./sfcf03 + cp $BK_ROOT/sfcf06 ./sfcf06 + cp $BK_ROOT/sfcf09 ./sfcf09 + + cp $BK_ROOT/sigf03 ./sigf03 + cp $BK_ROOT/sigf06 ./sigf06 + cp $BK_ROOT/sigf09 ./sigf09 +fi + +cp ${GSI_ROOT}/fix/comgsi_satbias_in ./satbias_in +cp ${GSI_ROOT}/fix/comgsi_satbias_pc_in ./satbias_pc_in + +# link GFS ensemble files +# ln -s $ENS_ROOT/sigf06_ens_mem* . +# link the localization file +# ln -s ${ENS_ROOT}/hybens_locinfo . +# Link to the prepbufr data +ln -s ${PREPBUFR} ./prepbufr + +# Link to the other observation data +if [ -r "${OBS_ROOT}/satwnd" ]; then + ln -s ${OBS_ROOT}/satwnd . +fi +if [ -r "${OBS_ROOT}/gpsrobufr" ]; then + ln -s ${OBS_ROOT}/gpsrobufr . +fi +if [ -r "${OBS_ROOT}/ssmirrbufr" ]; then + ln -s ${OBS_ROOT}/ssmirrbufr . +fi +if [ -r "${OBS_ROOT}/tmirrbufr" ]; then + ln -s ${OBS_ROOT}/tmirrbufr . +fi +if [ -r "${OBS_ROOT}/sbuvbufr" ]; then + ln -s ${OBS_ROOT}/sbuvbufr . +fi +if [ -r "${OBS_ROOT}/gsnd1bufr" ]; then + ln -s ${OBS_ROOT}/gsnd1bufr . +fi +if [ -r "${OBS_ROOT}/amsuabufr" ]; then + ln -s ${OBS_ROOT}/amsuabufr amsuabufr +fi +if [ -r "${OBS_ROOT}/amsubbufr" ]; then + ln -s ${OBS_ROOT}/amsubbufr amsubbufr +fi +if [ -r "${OBS_ROOT}/hirs2bufr" ]; then + ln -s ${OBS_ROOT}/hirs2bufr . +fi +if [ -r "${OBS_ROOT}/hirs3bufr" ]; then + ln -s ${OBS_ROOT}/hirs3bufr . +fi +if [ -r "${OBS_ROOT}/hirs4bufr" ]; then + ln -s ${OBS_ROOT}/hirs4bufr . +fi +if [ -r "${OBS_ROOT}/mhsbufr" ]; then + ln -s ${OBS_ROOT}/mhsbufr . +fi +if [ -r "${OBS_ROOT}//msubufr" ]; then + ln -s ${OBS_ROOT}/msubufr . +fi +if [ -r "${OBS_ROOT}//airsbufr" ]; then + ln -s ${OBS_ROOT}/airsbufr . +fi +if [ -r "${OBS_ROOT}//atmsbufr" ]; then + ln -s ${OBS_ROOT}/atmsbufr . +fi +if [ -r "${OBS_ROOT}//crisbufr" ]; then + ln -s ${OBS_ROOT}/crisbufr . +fi +if [ -r "${OBS_ROOT}//seviribufr" ]; then + ln -s ${OBS_ROOT}/seviribufr . +fi +if [ -r "${OBS_ROOT}//iasibufr" ]; then + ln -s ${OBS_ROOT}/iasibufr . +fi +if [ -r "${OBS_ROOT}//ssmitbufr" ]; then + ln -s ${OBS_ROOT}/ssmitbufr . +fi +if [ -r "${OBS_ROOT}//amsrebufr" ]; then + ln -s ${OBS_ROOT}/amsrebufr . +fi +if [ -r "${OBS_ROOT}//ssmisbufr" ]; then + ln -s ${OBS_ROOT}/ssmisbufr . +fi +if [ -r "${OBS_ROOT}//gomebufr" ]; then + ln -s ${OBS_ROOT}/gomebufr . +fi +if [ -r "${OBS_ROOT}//omibufr" ]; then + ln -s ${OBS_ROOT}/omibufr . +fi +if [ -r "${OBS_ROOT}/mlsbufr" ]; then + ln -s ${OBS_ROOT}/mlsbufr . +fi +if [ -r "${OBS_ROOT}/hirs3bufrears" ]; then + ln -s ${OBS_ROOT}/hirs3bufrears . +fi +if [ -r "${OBS_ROOT}/amsuabufrears" ]; then + ln -s ${OBS_ROOT}/amsuabufrears . +fi +if [ -r "${OBS_ROOT}/amsubbufrears" ]; then + ln -s ${OBS_ROOT}/amsubbufrears . +fi +if [ -r "${OBS_ROOT}/tcvitl" ]; then + ln -s ${OBS_ROOT}/tcvitl . +fi +if [ -r "${OBS_ROOT}/satwndbufr" ]; then + ln -s ${OBS_ROOT}/satwndbufr . +fi + + +# +################################################################################## + +echo " Copy fixed files and link CRTM coefficient files to working directory" + +# Set fixed files +# berror = forecast model background error statistics +# specoef = CRTM spectral coefficients +# trncoef = CRTM transmittance coefficients +# emiscoef = CRTM coefficients for IR sea surface emissivity model +# aerocoef = CRTM coefficients for aerosol effects +# cldcoef = CRTM coefficients for cloud effects +# satinfo = text file with information about assimilation of brightness temperatures +# satangl = angle dependent bias correction file (fixed in time) +# pcpinfo = text file with information about assimilation of prepcipitation rates +# ozinfo = text file with information about assimilation of ozone data +# errtable = text file with obs error for conventional data (regional only) +# convinfo = text file with information about assimilation of conventional data +# bufrtable= text file ONLY needed for single obs test (oneobstest=.true.) +# bftab_sst= bufr table for sst ONLY needed for sst retrieval (retrieval=.true.) + +ANAVINFO=${FIX_ROOT}/global_anavinfo.l64.txt +BERROR=${FIX_ROOT}/${BYTE_ORDER}/global_berror.l${LEVS}y${NLAT}.f77 +SATINFO=${FIX_ROOT}/global_satinfo.txt +scaninfo=${FIX_ROOT}/global_scaninfo.txt +SATANGL=${FIX_ROOT}/global_satangbias.txt +atmsbeamdat=${FIX_ROOT}/atms_beamwidth.txt +CONVINFO=${FIX_ROOT}/global_convinfo_reg_test.txt +OZINFO=${FIX_ROOT}/global_ozinfo.txt +PCPINFO=${FIX_ROOT}/global_pcpinfo.txt +OBERROR=${FIX_ROOT}/prepobs_errtable.global +CLOUDYRADINFO=${FIX_ROOT}/cloudy_radiance_info.txt +HYBENSINFO=${FIX_ROOT}/global_hybens_info.l64.txt + +# Only need this file for single obs test +bufrtable=${FIX_ROOT}/prepobs_prep.bufrtable + +# Only need this file for sst retrieval +bftab_sst=${FIX_ROOT}/bufrtab.012 + +# copy Fixed fields to working directory + cp $ANAVINFO anavinfo + cp $BERROR berror_stats + cp $SATANGL satbias_angle + cp $atmsbeamdat atms_beamwidth.txt + cp $SATINFO satinfo + cp $scaninfo scaninfo + cp $CONVINFO convinfo + cp $OZINFO ozinfo + cp $PCPINFO pcpinfo + cp $OBERROR errtable + cp $CLOUDYRADINFO cloudy_radiance_info.txt + cp $HYBENSINFO hybens_info + + cp $bufrtable ./prepobs_prep.bufrtable + cp $bftab_sst ./bftab_sstphr + +# +# CRTM Spectral and Transmittance coefficients +RTMFIX=${CRTM_ROOT}/${BYTE_ORDER} +emiscoef_IRwater=${RTMFIX}/Nalli.IRwater.EmisCoeff.bin +emiscoef_IRice=${RTMFIX}/NPOESS.IRice.EmisCoeff.bin +emiscoef_IRland=${RTMFIX}/NPOESS.IRland.EmisCoeff.bin +emiscoef_IRsnow=${RTMFIX}/NPOESS.IRsnow.EmisCoeff.bin +emiscoef_VISice=${RTMFIX}/NPOESS.VISice.EmisCoeff.bin +emiscoef_VISland=${RTMFIX}/NPOESS.VISland.EmisCoeff.bin +emiscoef_VISsnow=${RTMFIX}/NPOESS.VISsnow.EmisCoeff.bin +emiscoef_VISwater=${RTMFIX}/NPOESS.VISwater.EmisCoeff.bin +emiscoef_MWwater=${RTMFIX}/FASTEM6.MWwater.EmisCoeff.bin +aercoef=${RTMFIX}/AerosolCoeff.bin +cldcoef=${RTMFIX}/CloudCoeff.bin + +ln -s $emiscoef_IRwater ./Nalli.IRwater.EmisCoeff.bin +ln -s $emiscoef_IRice ./NPOESS.IRice.EmisCoeff.bin +ln -s $emiscoef_IRsnow ./NPOESS.IRsnow.EmisCoeff.bin +ln -s $emiscoef_IRland ./NPOESS.IRland.EmisCoeff.bin +ln -s $emiscoef_VISice ./NPOESS.VISice.EmisCoeff.bin +ln -s $emiscoef_VISland ./NPOESS.VISland.EmisCoeff.bin +ln -s $emiscoef_VISsnow ./NPOESS.VISsnow.EmisCoeff.bin +ln -s $emiscoef_VISwater ./NPOESS.VISwater.EmisCoeff.bin +ln -s $emiscoef_MWwater ./FASTEM6.MWwater.EmisCoeff.bin +ln -s $aercoef ./AerosolCoeff.bin +ln -s $cldcoef ./CloudCoeff.bin +# Copy CRTM coefficient files based on entries in satinfo file +for file in `awk '{if($1!~"!"){print $1}}' ./satinfo | sort | uniq` ;do + ln -s ${RTMFIX}/${file}.SpcCoeff.bin ./ + ln -s ${RTMFIX}/${file}.TauCoeff.bin ./ +done + +# +################################################### +# run GSI +################################################### +echo ' Run GSI with' ${bk_core} 'background' + +case $ARCH in + 'IBM_LSF') + ${RUN_COMMAND} ./gsi.x < gsiparm.anl > stdout 2>&1 ;; + + * ) + ${RUN_COMMAND} ./gsi.x > stdout 2>&1 ;; +esac + +################################################################## +# run time error check +################################################################## +error=$? + +if [ ${error} -ne 0 ]; then + echo "ERROR: ${GSI} crashed Exit status=${error}" + exit ${error} +fi +# +################################################################## +# +# Copy the output to more understandable names +ln -s stdout stdout.anl.${ANAL_TIME} +ln -s fort.201 fit_p1.${ANAL_TIME} +ln -s fort.202 fit_w1.${ANAL_TIME} +ln -s fort.203 fit_t1.${ANAL_TIME} +ln -s fort.204 fit_q1.${ANAL_TIME} +ln -s fort.207 fit_rad1.${ANAL_TIME} + +# Loop over first and last outer loops to generate innovation +# diagnostic files for indicated observation types (groups) +# +# NOTE: Since we set miter=2 in GSI namelist SETUP, outer +# loop 03 will contain innovations with respect to +# the analysis. Creation of o-a innovation files +# is triggered by write_diag(3)=.true. The setting +# write_diag(1)=.true. turns on creation of o-g +# innovation files. +# + +echo "Time before diagnostic loop is `date` " +loops="01 03" +for loop in $loops; do + +case $loop in + 01) string=ges;; + 03) string=anl;; + *) string=$loop;; +esac + +# Collect diagnostic files for obs types (groups) below + listall=`ls pe* | cut -f2 -d"." | awk '{print substr($0, 0, length($0)-3)}' | sort | uniq ` + + for type in $listall; do + count=`ls pe*${type}_${loop}* | wc -l` + if [[ $count -gt 0 ]]; then + cat pe*${type}_${loop}* > diag_${type}_${string}.${ANAL_TIME} + fi + done +done + +# Clean working directory to save only important files +ls -l * > list_run_directory +if [[ ${if_clean} = clean && ${if_observer} != Yes ]]; then + echo ' Clean working directory after GSI run' + rm -f *Coeff.bin # all CRTM coefficient files + rm -fr pe0* # diag files on each processor + rm -f obs_input.* # observation middle files + rm -f sigf* sfcf* # background files + rm -f fsize_* # delete temperal file for bufr size +fi + +################################################# +# start to calculate diag files for each member +################################################# +# +if [ ${if_observer} = Yes ] ; then + string=ges + for type in $listall; do + count=0 + if [[ -f diag_${type}_${string}.${ANAL_TIME} ]]; then + mv diag_${type}_${string}.${ANAL_TIME} diag_${type}_${string}.ensmean + fi + done + +# Build the GSI namelist on-the-fly for each member + nummiter=0 + if_read_obs_save='.false.' + if_read_obs_skip='.true.' +. $GSI_NAMELIST + +# Loop through each member + loop="01" + ensmem=1 + while [[ $ensmem -le $no_member ]];do + + rm pe0* + + print "\$ensmem is $ensmem" + ensmemid=`printf %3.3i $ensmem` + +# get new background for each member + if [[ -f sigf03 ]]; then + rm sigf03 + fi + if [[ -f sigf06 ]]; then + rm sigf06 + fi + if [[ -f sigf09 ]]; then + rm sigf09 + fi + + BK_FILE03=${BK_FILE_mem}_fhr03_mem${ensmemid} + BK_FILE06=${BK_FILE_mem}_fhr06_mem${ensmemid} + BK_FILE09=${BK_FILE_mem}_fhr09_mem${ensmemid} + echo $BK_FILE06 + ln -s $BK_FILE03 ./sigf03 + ln -s $BK_FILE06 ./sigf06 + ln -s $BK_FILE09 ./sigf09 + +# run GSI + echo ' Run GSI with' ${bk_core} 'for member ', ${ensmemid} + + case $ARCH in + 'IBM_LSF') + ${RUN_COMMAND} ./gsi.x < gsiparm.anl > stdout_mem${ensmemid} 2>&1 ;; + + * ) + ${RUN_COMMAND} ./gsi.x > stdout_mem${ensmemid} 2>&1 ;; + esac + +# run time error check and save run time file status + error=$? + + if [ ${error} -ne 0 ]; then + echo "ERROR: ${GSI} crashed for member ${ensmemid} Exit status=${error}" + exit ${error} + fi + + ls -l * > list_run_directory_mem${ensmemid} + +# generate diag files + + for type in $listall; do + count=`ls pe*${type}_${loop}* | wc -l` + if [[ $count -gt 0 ]]; then + cat pe*${type}_${loop}* > diag_${type}_${string}.mem${ensmemid} + fi + done + +# next member + (( ensmem += 1 )) + + done + +fi + +exit 0 + +exit 0 diff --git a/ush/comgsi_run_regional.ksh b/ush/comgsi_run_regional.ksh new file mode 100755 index 000000000..424237520 --- /dev/null +++ b/ush/comgsi_run_regional.ksh @@ -0,0 +1,666 @@ +#!/bin/ksh +##################################################### +# machine set up (users should change this part) +##################################################### + +set -x +# +# GSIPROC = processor number used for GSI analysis +#------------------------------------------------ + GSIPROC=1 + ARCH='LINUX_LSF' + +# Supported configurations: + # IBM_LSF, + # LINUX, LINUX_LSF, LINUX_PBS, + # DARWIN_PGI +# +##################################################### +# case set up (users should change this part) +##################################################### +# +# ANAL_TIME= analysis time (YYYYMMDDHH) +# WORK_ROOT= working directory, where GSI runs +# PREPBURF = path of PreBUFR conventional obs +# BK_FILE = path and name of background file +# OBS_ROOT = path of observations files +# FIX_ROOT = path of fix files +# GSI_EXE = path and name of the gsi executable +# ENS_ROOT = path where ensemble background files exist + ANAL_TIME=2017051318 + JOB_DIR=the_job_directory + #normally you put run scripts here and submit jobs form here, require a copy of gsi.x at this directory + RUN_NAME=a_descriptive_run_name_such_as_case05_3denvar_etc + OBS_ROOT=the_directory_where_observation_files_are_located + BK_ROOT=the_directory_where_background_files_are_located + GSI_ROOT=the_comgsi_main directory where src/ ush/ fix/ etc are located + CRTM_ROOT=the_CRTM_directory + ENS_ROOT=the_directory_where_ensemble_backgrounds_are_located + #ENS_ROOT is not required if not running hybrid EnVAR + HH=`echo $ANAL_TIME | cut -c9-10` + GSI_EXE=${JOB_DIR}/gsi.x #assume you have a copy of gsi.x here + WORK_ROOT=${JOB_DIR}/${RUN_NAME} + FIX_ROOT=${GSI_ROOT}/fix + GSI_NAMELIST=${GSI_ROOT}/ush/comgsi_namelist.sh + PREPBUFR=${OBS_ROOT}/nam.t${HH}z.prepbufr.tm00 + BK_FILE=${BK_ROOT}/wrfinput_d01.${ANAL_TIME} +# +#------------------------------------------------ +# bk_core= which WRF core is used as background (NMM or ARW or NMMB) +# bkcv_option= which background error covariance and parameter will be used +# (GLOBAL or NAM) +# if_clean = clean : delete temperal files in working directory (default) +# no : leave running directory as is (this is for debug only) +# if_observer = Yes : only used as observation operater for enkf +# if_hybrid = Yes : Run GSI as 3D/4D EnVar +# if_4DEnVar = Yes : Run GSI as 4D EnVar +# if_nemsio = Yes : The GFS background files are in NEMSIO format +# if_oneob = Yes : Do single observation test + if_hybrid=No # Yes, or, No -- case sensitive ! + if_4DEnVar=No # Yes, or, No -- case sensitive (set if_hybrid=Yes first)! + if_observer=No # Yes, or, No -- case sensitive ! + if_nemsio=No # Yes, or, No -- case sensitive ! + if_oneob=No # Yes, or, No -- case sensitive ! + + bk_core=ARW + bkcv_option=NAM + if_clean=clean +# +# setup whether to do single obs test + if [ ${if_oneob} = Yes ]; then + if_oneobtest='.true.' + else + if_oneobtest='.false.' + fi +# +# setup for GSI 3D/4D EnVar hybrid + if [ ${if_hybrid} = Yes ] ; then + PDYa=`echo $ANAL_TIME | cut -c1-8` + cyca=`echo $ANAL_TIME | cut -c9-10` + gdate=`date -u -d "$PDYa $cyca -6 hour" +%Y%m%d%H` #guess date is 6hr ago + gHH=`echo $gdate |cut -c9-10` + datem1=`date -u -d "$PDYa $cyca -1 hour" +%Y-%m-%d_%H:%M:%S` #1hr ago + datep1=`date -u -d "$PDYa $cyca 1 hour" +%Y-%m-%d_%H:%M:%S` #1hr later + if [ ${if_nemsio} = Yes ]; then + if_gfs_nemsio='.true.' + ENSEMBLE_FILE_mem=${ENS_ROOT}/gdas.t${gHH}z.atmf006s.mem + else + if_gfs_nemsio='.false.' + ENSEMBLE_FILE_mem=${ENS_ROOT}/sfg_${gdate}_fhr06s_mem + fi + + if [ ${if_4DEnVar} = Yes ] ; then + BK_FILE_P1=${BK_ROOT}/wrfout_d01_${datep1} + BK_FILE_M1=${BK_ROOT}/wrfout_d01_${datem1} + + if [ ${if_nemsio} = Yes ]; then + ENSEMBLE_FILE_mem_p1=${ENS_ROOT}/gdas.t${gHH}z.atmf009s.mem + ENSEMBLE_FILE_mem_m1=${ENS_ROOT}/gdas.t${gHH}z.atmf003s.mem + else + ENSEMBLE_FILE_mem_p1=${ENS_ROOT}/sfg_${gdate}_fhr09s_mem + ENSEMBLE_FILE_mem_m1=${ENS_ROOT}/sfg_${gdate}_fhr03s_mem + fi + fi + fi + +# The following two only apply when if_observer = Yes, i.e. run observation operator for EnKF +# no_member number of ensemble members +# BK_FILE_mem path and base for ensemble members + no_member=20 + BK_FILE_mem=${BK_ROOT}/wrfarw.mem +# +# +##################################################### +# Users should NOT make changes after this point +##################################################### +# +BYTE_ORDER=Big_Endian +# BYTE_ORDER=Little_Endian + +case $ARCH in + 'IBM_LSF') + ###### IBM LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX') + if [ $GSIPROC = 1 ]; then + #### Linux workstation - single processor + RUN_COMMAND="" + else + ###### Linux workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + 'LINUX_LSF') + ###### LINUX LSF (Load Sharing Facility) + RUN_COMMAND="mpirun.lsf " ;; + + 'LINUX_PBS') + #### Linux cluster PBS (Portable Batch System) + RUN_COMMAND="mpirun -np ${GSIPROC} " ;; + + 'DARWIN_PGI') + ### Mac - mpi run + if [ $GSIPROC = 1 ]; then + #### Mac workstation - single processor + RUN_COMMAND="" + else + ###### Mac workstation - mpi run + RUN_COMMAND="mpirun -np ${GSIPROC} -machinefile ~/mach " + fi ;; + + * ) + print "error: $ARCH is not a supported platform configuration." + exit 1 ;; +esac + + +################################################################################## +# Check GSI needed environment variables are defined and exist +# + +# Make sure ANAL_TIME is defined and in the correct format +if [ ! "${ANAL_TIME}" ]; then + echo "ERROR: \$ANAL_TIME is not defined!" + exit 1 +fi + +# Make sure WORK_ROOT is defined and exists +if [ ! "${WORK_ROOT}" ]; then + echo "ERROR: \$WORK_ROOT is not defined!" + exit 1 +fi + +# Make sure the background file exists +if [ ! -r "${BK_FILE}" ]; then + echo "ERROR: ${BK_FILE} does not exist!" + exit 1 +fi + +# Make sure OBS_ROOT is defined and exists +if [ ! "${OBS_ROOT}" ]; then + echo "ERROR: \$OBS_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${OBS_ROOT}" ]; then + echo "ERROR: OBS_ROOT directory '${OBS_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the GSI static files +if [ ! "${FIX_ROOT}" ]; then + echo "ERROR: \$FIX_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${FIX_ROOT}" ]; then + echo "ERROR: fix directory '${FIX_ROOT}' does not exist!" + exit 1 +fi + +# Set the path to the CRTM coefficients +if [ ! "${CRTM_ROOT}" ]; then + echo "ERROR: \$CRTM_ROOT is not defined!" + exit 1 +fi +if [ ! -d "${CRTM_ROOT}" ]; then + echo "ERROR: fix directory '${CRTM_ROOT}' does not exist!" + exit 1 +fi + + +# Make sure the GSI executable exists +if [ ! -x "${GSI_EXE}" ]; then + echo "ERROR: ${GSI_EXE} does not exist!" + exit 1 +fi + +# Check to make sure the number of processors for running GSI was specified +if [ -z "${GSIPROC}" ]; then + echo "ERROR: The variable $GSIPROC must be set to contain the number of processors to run GSI" + exit 1 +fi + +# +################################################################################## +# Create the ram work directory and cd into it + +workdir=${WORK_ROOT} +echo " Create working directory:" ${workdir} + +if [ -d "${workdir}" ]; then + rm -rf ${workdir} +fi +mkdir -p ${workdir} +cd ${workdir} + +# +################################################################################## + +echo " Copy GSI executable, background file, and link observation bufr to working directory" + +# Save a copy of the GSI executable in the workdir +cp ${GSI_EXE} gsi.x + +# Bring over background field (it's modified by GSI so we can't link to it) +cp ${BK_FILE} ./wrf_inout +if [ ${if_4DEnVar} = Yes ] ; then + cp ${BK_FILE_P1} ./wrf_inou3 + cp ${BK_FILE_M1} ./wrf_inou1 +fi + + +# Link to the prepbufr data +ln -s ${PREPBUFR} ./prepbufr + +# ln -s ${OBS_ROOT}/gdas1.t${HH}z.sptrmm.tm00.bufr_d tmirrbufr +# Link to the radiance data +srcobsfile[1]=${OBS_ROOT}/gdas1.t${HH}z.satwnd.tm00.bufr_d +gsiobsfile[1]=satwnd +srcobsfile[2]=${OBS_ROOT}/gdas1.t${HH}z.1bamua.tm00.bufr_d +gsiobsfile[2]=amsuabufr +srcobsfile[3]=${OBS_ROOT}/gdas1.t${HH}z.1bhrs4.tm00.bufr_d +gsiobsfile[3]=hirs4bufr +srcobsfile[4]=${OBS_ROOT}/gdas1.t${HH}z.1bmhs.tm00.bufr_d +gsiobsfile[4]=mhsbufr +srcobsfile[5]=${OBS_ROOT}/gdas1.t${HH}z.1bamub.tm00.bufr_d +gsiobsfile[5]=amsubbufr +srcobsfile[6]=${OBS_ROOT}/gdas1.t${HH}z.ssmisu.tm00.bufr_d +gsiobsfile[6]=ssmirrbufr +# srcobsfile[7]=${OBS_ROOT}/gdas1.t${HH}z.airsev.tm00.bufr_d +gsiobsfile[7]=airsbufr +srcobsfile[8]=${OBS_ROOT}/gdas1.t${HH}z.sevcsr.tm00.bufr_d +gsiobsfile[8]=seviribufr +srcobsfile[9]=${OBS_ROOT}/gdas1.t${HH}z.iasidb.tm00.bufr_d +gsiobsfile[9]=iasibufr +srcobsfile[10]=${OBS_ROOT}/gdas1.t${HH}z.gpsro.tm00.bufr_d +gsiobsfile[10]=gpsrobufr +srcobsfile[11]=${OBS_ROOT}/gdas1.t${HH}z.amsr2.tm00.bufr_d +gsiobsfile[11]=amsrebufr +srcobsfile[12]=${OBS_ROOT}/gdas1.t${HH}z.atms.tm00.bufr_d +gsiobsfile[12]=atmsbufr +srcobsfile[13]=${OBS_ROOT}/gdas1.t${HH}z.geoimr.tm00.bufr_d +gsiobsfile[13]=gimgrbufr +srcobsfile[14]=${OBS_ROOT}/gdas1.t${HH}z.gome.tm00.bufr_d +gsiobsfile[14]=gomebufr +srcobsfile[15]=${OBS_ROOT}/gdas1.t${HH}z.omi.tm00.bufr_d +gsiobsfile[15]=omibufr +srcobsfile[16]=${OBS_ROOT}/gdas1.t${HH}z.osbuv8.tm00.bufr_d +gsiobsfile[16]=sbuvbufr +srcobsfile[17]=${OBS_ROOT}/gdas1.t${HH}z.eshrs3.tm00.bufr_d +gsiobsfile[17]=hirs3bufrears +srcobsfile[18]=${OBS_ROOT}/gdas1.t${HH}z.esamua.tm00.bufr_d +gsiobsfile[18]=amsuabufrears +srcobsfile[19]=${OBS_ROOT}/gdas1.t${HH}z.esmhs.tm00.bufr_d +gsiobsfile[19]=mhsbufrears +srcobsfile[20]=${OBS_ROOT}/rap.t${HH}z.nexrad.tm00.bufr_d +gsiobsfile[20]=l2rwbufr +srcobsfile[21]=${OBS_ROOT}/rap.t${HH}z.lgycld.tm00.bufr_d +gsiobsfile[21]=larcglb +ii=1 +while [[ $ii -le 21 ]]; do + if [ -r "${srcobsfile[$ii]}" ]; then +# ln -s ${srcobsfile[$ii]} ${gsiobsfile[$ii]} + echo "link source obs file ${srcobsfile[$ii]}" + fi + (( ii = $ii + 1 )) +done + +# +################################################################################## + +ifhyb=.false. +if [ ${if_hybrid} = Yes ] ; then + ls ${ENSEMBLE_FILE_mem}* > filelist02 + if [ ${if_4DEnVar} = Yes ] ; then + ls ${ENSEMBLE_FILE_mem_p1}* > filelist03 + ls ${ENSEMBLE_FILE_mem_m1}* > filelist01 + fi + + nummem=`more filelist02 | wc -l` + nummem=$((nummem -3 )) + + if [[ ${nummem} -ge 5 ]]; then + ifhyb=.true. + ${ECHO} " GSI hybrid uses ${ENSEMBLE_FILE_mem} with n_ens=${nummem}" + fi +fi +if4d=.false. +if [[ ${ifhyb} = .true. && ${if_4DEnVar} = Yes ]] ; then + if4d=.true. +fi +# +################################################################################## + +echo " Copy fixed files and link CRTM coefficient files to working directory" + +# Set fixed files +# berror = forecast model background error statistics +# specoef = CRTM spectral coefficients +# trncoef = CRTM transmittance coefficients +# emiscoef = CRTM coefficients for IR sea surface emissivity model +# aerocoef = CRTM coefficients for aerosol effects +# cldcoef = CRTM coefficients for cloud effects +# satinfo = text file with information about assimilation of brightness temperatures +# satangl = angle dependent bias correction file (fixed in time) +# pcpinfo = text file with information about assimilation of prepcipitation rates +# ozinfo = text file with information about assimilation of ozone data +# errtable = text file with obs error for conventional data (regional only) +# convinfo = text file with information about assimilation of conventional data +# bufrtable= text file ONLY needed for single obs test (oneobstest=.true.) +# bftab_sst= bufr table for sst ONLY needed for sst retrieval (retrieval=.true.) + +if [ ${bkcv_option} = GLOBAL ] ; then + echo ' Use global background error covariance' + BERROR=${FIX_ROOT}/${BYTE_ORDER}/nam_glb_berror.f77.gcv + OBERROR=${FIX_ROOT}/prepobs_errtable.global + if [ ${bk_core} = NMM ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_ndas_netcdf_glbe + fi + if [ ${bk_core} = ARW ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_arw_netcdf_glbe + fi + if [ ${bk_core} = NMMB ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_nems_nmmb_glb + fi +else + echo ' Use NAM background error covariance' + BERROR=${FIX_ROOT}/${BYTE_ORDER}/nam_nmmstat_na.gcv + OBERROR=${FIX_ROOT}/nam_errtable.r3dv + if [ ${bk_core} = NMM ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_ndas_netcdf + fi + if [ ${bk_core} = ARW ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_arw_netcdf + fi + if [ ${bk_core} = NMMB ] ; then + ANAVINFO=${FIX_ROOT}/anavinfo_nems_nmmb + fi +fi + +SATANGL=${FIX_ROOT}/global_satangbias.txt +SATINFO=${FIX_ROOT}/global_satinfo.txt +CONVINFO=${FIX_ROOT}/global_convinfo.txt +OZINFO=${FIX_ROOT}/global_ozinfo.txt +PCPINFO=${FIX_ROOT}/global_pcpinfo.txt + +# copy Fixed fields to working directory + cp $ANAVINFO anavinfo + cp $BERROR berror_stats + cp $SATANGL satbias_angle + cp $SATINFO satinfo + cp $CONVINFO convinfo + cp $OZINFO ozinfo + cp $PCPINFO pcpinfo + cp $OBERROR errtable +# +# # CRTM Spectral and Transmittance coefficients +CRTM_ROOT_ORDER=${CRTM_ROOT}/${BYTE_ORDER} +emiscoef_IRwater=${CRTM_ROOT_ORDER}/Nalli.IRwater.EmisCoeff.bin +emiscoef_IRice=${CRTM_ROOT_ORDER}/NPOESS.IRice.EmisCoeff.bin +emiscoef_IRland=${CRTM_ROOT_ORDER}/NPOESS.IRland.EmisCoeff.bin +emiscoef_IRsnow=${CRTM_ROOT_ORDER}/NPOESS.IRsnow.EmisCoeff.bin +emiscoef_VISice=${CRTM_ROOT_ORDER}/NPOESS.VISice.EmisCoeff.bin +emiscoef_VISland=${CRTM_ROOT_ORDER}/NPOESS.VISland.EmisCoeff.bin +emiscoef_VISsnow=${CRTM_ROOT_ORDER}/NPOESS.VISsnow.EmisCoeff.bin +emiscoef_VISwater=${CRTM_ROOT_ORDER}/NPOESS.VISwater.EmisCoeff.bin +emiscoef_MWwater=${CRTM_ROOT_ORDER}/FASTEM6.MWwater.EmisCoeff.bin +aercoef=${CRTM_ROOT_ORDER}/AerosolCoeff.bin +cldcoef=${CRTM_ROOT_ORDER}/CloudCoeff.bin + +ln -s $emiscoef_IRwater ./Nalli.IRwater.EmisCoeff.bin +ln -s $emiscoef_IRice ./NPOESS.IRice.EmisCoeff.bin +ln -s $emiscoef_IRsnow ./NPOESS.IRsnow.EmisCoeff.bin +ln -s $emiscoef_IRland ./NPOESS.IRland.EmisCoeff.bin +ln -s $emiscoef_VISice ./NPOESS.VISice.EmisCoeff.bin +ln -s $emiscoef_VISland ./NPOESS.VISland.EmisCoeff.bin +ln -s $emiscoef_VISsnow ./NPOESS.VISsnow.EmisCoeff.bin +ln -s $emiscoef_VISwater ./NPOESS.VISwater.EmisCoeff.bin +ln -s $emiscoef_MWwater ./FASTEM6.MWwater.EmisCoeff.bin +ln -s $aercoef ./AerosolCoeff.bin +ln -s $cldcoef ./CloudCoeff.bin +# Copy CRTM coefficient files based on entries in satinfo file +for file in `awk '{if($1!~"!"){print $1}}' ./satinfo | sort | uniq` ;do + ln -s ${CRTM_ROOT_ORDER}/${file}.SpcCoeff.bin ./ + ln -s ${CRTM_ROOT_ORDER}/${file}.TauCoeff.bin ./ +done + +# Only need this file for single obs test + bufrtable=${FIX_ROOT}/prepobs_prep.bufrtable + cp $bufrtable ./prepobs_prep.bufrtable + +# for satellite bias correction +# Users may need to use their own satbias files for correct bias correction +cp ${GSI_ROOT}/fix/comgsi_satbias_in ./satbias_in +cp ${GSI_ROOT}/fix/comgsi_satbias_pc_in ./satbias_pc_in + +# +################################################################################## +# Set some parameters for use by the GSI executable and to build the namelist +echo " Build the namelist " + +# default is NAM +# as_op='1.0,1.0,0.5 ,0.7,0.7,0.5,1.0,1.0,' +vs_op='1.0,' +hzscl_op='0.373,0.746,1.50,' +if [ ${bkcv_option} = GLOBAL ] ; then +# as_op='0.6,0.6,0.75,0.75,0.75,0.75,1.0,1.0' + vs_op='0.7,' + hzscl_op='1.7,0.8,0.5,' +fi +if [ ${bk_core} = NMMB ] ; then + vs_op='0.6,' +fi + +# default is NMM + bk_core_arw='.false.' + bk_core_nmm='.true.' + bk_core_nmmb='.false.' + bk_if_netcdf='.true.' +if [ ${bk_core} = ARW ] ; then + bk_core_arw='.true.' + bk_core_nmm='.false.' + bk_core_nmmb='.false.' + bk_if_netcdf='.true.' +fi +if [ ${bk_core} = NMMB ] ; then + bk_core_arw='.false.' + bk_core_nmm='.false.' + bk_core_nmmb='.true.' + bk_if_netcdf='.false.' +fi + +if [ ${if_observer} = Yes ] ; then + nummiter=0 + if_read_obs_save='.true.' + if_read_obs_skip='.false.' +else + nummiter=2 + if_read_obs_save='.false.' + if_read_obs_skip='.false.' +fi + +# Build the GSI namelist on-the-fly +. $GSI_NAMELIST + +# modify the anavinfo vertical levels based on wrf_inout for WRF ARW and NMM +if [ ${bk_core} = ARW ] || [ ${bk_core} = NMM ] ; then +bklevels=`ncdump -h wrf_inout | grep "bottom_top =" | awk '{print $3}' ` +bklevels_stag=`ncdump -h wrf_inout | grep "bottom_top_stag =" | awk '{print $3}' ` +anavlevels=`cat anavinfo | grep ' sf ' | tail -1 | awk '{print $2}' ` # levels of sf, vp, u, v, t, etc +anavlevels_stag=`cat anavinfo | grep ' prse ' | tail -1 | awk '{print $2}' ` # levels of prse +sed -i 's/ '$anavlevels'/ '$bklevels'/g' anavinfo +sed -i 's/ '$anavlevels_stag'/ '$bklevels_stag'/g' anavinfo +fi + +# +################################################### +# run GSI +################################################### +echo ' Run GSI with' ${bk_core} 'background' + +case $ARCH in + 'IBM_LSF') + ${RUN_COMMAND} ./gsi.x < gsiparm.anl > stdout 2>&1 ;; + + * ) + ${RUN_COMMAND} ./gsi.x > stdout 2>&1 ;; +esac + +################################################################## +# run time error check +################################################################## +error=$? + +if [ ${error} -ne 0 ]; then + echo "ERROR: ${GSI} crashed Exit status=${error}" + exit ${error} +fi + +# +################################################################## +# +# GSI updating satbias_in +# +# GSI updating satbias_in (only for cycling assimilation) + +# Copy the output to more understandable names +ln -s stdout stdout.anl.${ANAL_TIME} +ln -s wrf_inout wrfanl.${ANAL_TIME} +ln -s fort.201 fit_p1.${ANAL_TIME} +ln -s fort.202 fit_w1.${ANAL_TIME} +ln -s fort.203 fit_t1.${ANAL_TIME} +ln -s fort.204 fit_q1.${ANAL_TIME} +ln -s fort.207 fit_rad1.${ANAL_TIME} + +# Loop over first and last outer loops to generate innovation +# diagnostic files for indicated observation types (groups) +# +# NOTE: Since we set miter=2 in GSI namelist SETUP, outer +# loop 03 will contain innovations with respect to +# the analysis. Creation of o-a innovation files +# is triggered by write_diag(3)=.true. The setting +# write_diag(1)=.true. turns on creation of o-g +# innovation files. +# + +loops="01 03" +for loop in $loops; do + +case $loop in + 01) string=ges;; + 03) string=anl;; + *) string=$loop;; +esac + +# Collect diagnostic files for obs types (groups) below +# listall="conv amsua_metop-a mhs_metop-a hirs4_metop-a hirs2_n14 msu_n14 \ +# sndr_g08 sndr_g10 sndr_g12 sndr_g08_prep sndr_g10_prep sndr_g12_prep \ +# sndrd1_g08 sndrd2_g08 sndrd3_g08 sndrd4_g08 sndrd1_g10 sndrd2_g10 \ +# sndrd3_g10 sndrd4_g10 sndrd1_g12 sndrd2_g12 sndrd3_g12 sndrd4_g12 \ +# hirs3_n15 hirs3_n16 hirs3_n17 amsua_n15 amsua_n16 amsua_n17 \ +# amsub_n15 amsub_n16 amsub_n17 hsb_aqua airs_aqua amsua_aqua \ +# goes_img_g08 goes_img_g10 goes_img_g11 goes_img_g12 \ +# pcp_ssmi_dmsp pcp_tmi_trmm sbuv2_n16 sbuv2_n17 sbuv2_n18 \ +# omi_aura ssmi_f13 ssmi_f14 ssmi_f15 hirs4_n18 amsua_n18 mhs_n18 \ +# amsre_low_aqua amsre_mid_aqua amsre_hig_aqua ssmis_las_f16 \ +# ssmis_uas_f16 ssmis_img_f16 ssmis_env_f16 mhs_metop_b \ +# hirs4_metop_b hirs4_n19 amusa_n19 mhs_n19" + listall=`ls pe* | cut -f2 -d"." | awk '{print substr($0, 0, length($0)-3)}' | sort | uniq ` + + for type in $listall; do + count=`ls pe*${type}_${loop}* | wc -l` + if [[ $count -gt 0 ]]; then + cat pe*${type}_${loop}* > diag_${type}_${string}.${ANAL_TIME} + fi + done +done + +# Clean working directory to save only important files +ls -l * > list_run_directory +if [[ ${if_clean} = clean && ${if_observer} != Yes ]]; then + echo ' Clean working directory after GSI run' + rm -f *Coeff.bin # all CRTM coefficient files + rm -f pe0* # diag files on each processor + rm -f obs_input.* # observation middle files + rm -f siganl sigf0? # background middle files + rm -f fsize_* # delete temperal file for bufr size +fi +# +# +################################################# +# start to calculate diag files for each member +################################################# +# +if [ ${if_observer} = Yes ] ; then + string=ges + for type in $listall; do + count=0 + if [[ -f diag_${type}_${string}.${ANAL_TIME} ]]; then + mv diag_${type}_${string}.${ANAL_TIME} diag_${type}_${string}.ensmean + fi + done + mv wrf_inout wrf_inout_ensmean + +# Build the GSI namelist on-the-fly for each member + nummiter=0 + if_read_obs_save='.false.' + if_read_obs_skip='.true.' +. $GSI_NAMELIST + +# Loop through each member + loop="01" + ensmem=1 + while [[ $ensmem -le $no_member ]];do + + rm pe0* + + print "\$ensmem is $ensmem" + ensmemid=`printf %3.3i $ensmem` + +# get new background for each member + if [[ -f wrf_inout ]]; then + rm wrf_inout + fi + + BK_FILE=${BK_FILE_mem}${ensmemid} + echo $BK_FILE + ln -s $BK_FILE wrf_inout + +# run GSI + echo ' Run GSI with' ${bk_core} 'for member ', ${ensmemid} + + case $ARCH in + 'IBM_LSF') + ${RUN_COMMAND} ./gsi.x < gsiparm.anl > stdout_mem${ensmemid} 2>&1 ;; + + * ) + ${RUN_COMMAND} ./gsi.x > stdout_mem${ensmemid} 2>&1 ;; + esac + +# run time error check and save run time file status + error=$? + + if [ ${error} -ne 0 ]; then + echo "ERROR: ${GSI} crashed for member ${ensmemid} Exit status=${error}" + exit ${error} + fi + + ls -l * > list_run_directory_mem${ensmemid} + +# generate diag files + + for type in $listall; do + count=`ls pe*${type}_${loop}* | wc -l` + if [[ $count -gt 0 ]]; then + cat pe*${type}_${loop}* > diag_${type}_${string}.mem${ensmemid} + fi + done + +# next member + (( ensmem += 1 )) + + done + +fi + +exit 0 diff --git a/util/Analysis_Utilities/read_diag/CMakeLists.txt b/util/Analysis_Utilities/read_diag/CMakeLists.txt new file mode 100644 index 000000000..d712632a2 --- /dev/null +++ b/util/Analysis_Utilities/read_diag/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 2.6) + file(GLOB LOCAL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.f90) + set_source_files_properties( ${LOCAL_SRC} PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + + add_executable(read_diag_conv.x read_diag_conv.f90 ) + set_target_properties( read_diag_conv.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + add_dependencies(read_diag_conv.x ${GSILIB} ) + + add_executable(read_diag_rad.x read_diag_rad.f90 ) + set_target_properties( read_diag_rad.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + add_dependencies(read_diag_rad.x ${GSILIB} ) diff --git a/util/EnKF/arw/run/anavinfo b/util/EnKF/arw/run/anavinfo new file mode 100644 index 000000000..634f55676 --- /dev/null +++ b/util/EnKF/arw/run/anavinfo @@ -0,0 +1,77 @@ +met_guess:: +!var level crtm_use desc orig_name + ps 1 -1 surface_pressure ps + z 1 -1 geopotential_height phis + u 50 2 zonal_wind u + v 50 2 meridional_wind v + div 50 -1 zonal_wind div + vor 50 -1 meridional_wind vor + tv 50 2 virtual_temperature tv + q 50 2 specific_humidity sphu + oz 50 2 ozone ozone + cw 50 10 cloud_condensate cw + ql 50 10 cloud_liquid ql + qi 50 10 cloud_ice qi + qr 50 10 rain qr + qs 50 10 snow qs + qg 50 10 graupel qg + qnr 50 10 rain_noconc qnr + th2m 1 -1 2-m_T th2m + q2m 1 -1 2-m_Q q2m + tskn 1 -1 skin_T tskn + tsoil 1 -1 soil_T tsoil + smoist 9 -1 soilmoist smoist + tslb 9 -1 soilt tslb +:: + +state_derivatives:: +!var level src + ps 1 met_guess + u 50 met_guess + v 50 met_guess + tv 50 met_guess + q 50 met_guess + oz 50 met_guess + cw 50 met_guess + prse 51 met_guess +:: + +state_tendencies:: +!var levels source + u 50 met_guess + v 50 met_guess + tv 50 met_guess + q 50 met_guess + cw 50 met_guess + oz 50 met_guess + prse 51 met_guess +:: + +state_vector:: +!var level itracer source funcof + u 50 0 met_guess u + v 50 0 met_guess v + tv 50 0 met_guess tv + tsen 50 0 met_guess tv,q + q 50 1 met_guess q + oz 50 1 met_guess oz + cw 50 1 met_guess cw + prse 51 0 met_guess prse + ps 1 0 met_guess prse + sst 1 0 met_guess sst +:: + +control_vector:: +!var level itracer as/tsfc_sdv an_amp0 source funcof + sf 50 0 1.00 -1.0 state u,v + vp 50 0 1.00 -1.0 state u,v + ps 1 0 0.50 -1.0 state prse + t 50 0 0.70 -1.0 state tv + q 50 1 0.70 -1.0 state q + oz 50 1 0.50 -1.0 state oz + sst 1 0 1.00 -1.0 state sst + cw 50 1 1.00 -1.0 state cw + stl 1 0 1.00 -1.0 motley sst + sti 1 0 1.00 -1.0 motley sst +:: + diff --git a/util/EnKF/arw/run/namelist.input b/util/EnKF/arw/run/namelist.input new file mode 100644 index 000000000..cd2c4cf20 --- /dev/null +++ b/util/EnKF/arw/run/namelist.input @@ -0,0 +1,17 @@ + &SETUP + regional=.true. , + wrf_mass_regional=.true. , + diagnostic_reg=.true. , + switch_on_derivatives=.false. , + tendsflag=.false. , + nfldsig=1 , + grid_ratio_ens=1, + n_ens=80, + grid_ratio_ens = 1, + grid_ratio_wrfmass=1, + use_gfs_nemsio=.true., + jcap_ens=574, + enpert4arw=.true., + wrt_pert_sub=.false., + wrt_pert_mem=.false., + / diff --git a/util/EnKF/arw/run/run_init.ksh b/util/EnKF/arw/run/run_init.ksh new file mode 100755 index 000000000..addd61986 --- /dev/null +++ b/util/EnKF/arw/run/run_init.ksh @@ -0,0 +1,26 @@ +#!/bin/ksh +set -x + +cd /mnt/lfs3/projects/rtwbl/mhu/GSI_r1181/util/EnKF/enspreproc_regional.fd/run + +# Loop through each member + no_member=17 + ensmem=1 + while [[ $ensmem -le $no_member ]];do + + print "\$ensmem is $ensmem" + ensmemid=`printf %4.4i $ensmem` + +# get background for each member + cp wrf_inout wrfinput_d01.mem${ensmemid} + +# next member + (( ensmem += 1 )) + + done + +cp /mnt/lfs3/projects/rtwbl/mhu/GSI_r1181/util/EnKF/initialens_regional.fd/initialens.x . + +./initialens.x ${no_member} + +exit 0 diff --git a/util/EnKF/arw/run/run_pro.ksh b/util/EnKF/arw/run/run_pro.ksh new file mode 100755 index 000000000..9476882e5 --- /dev/null +++ b/util/EnKF/arw/run/run_pro.ksh @@ -0,0 +1,26 @@ +#!/bin/ksh --login + +# Set the queueing options +#PBS -l procs=120 +#PBS -l walltime=0:30:00 +#PBS -A rtwbl +#PBS -q debug +#PBS -N wrf_gsi +#PBS -l partition=tjet +#PBS -j oe + +set -x +np=$PBS_NP + +# Load modules +module load intel +module load mvapich2 +module load netcdf + +set -x + +cd /mnt/lfs3/projects/rtwbl/mhu/GSI_r1181/util/EnKF/enspreproc_regional.fd/run + +/usr/bin/time mpiexec -envall -np ${np} /mnt/lfs3/projects/rtwbl/mhu/GSI_r1181/util/EnKF/enspreproc_regional.fd/enspreproc.x + +exit 0 diff --git a/util/EnKF/arw/src/CMakeLists.txt b/util/EnKF/arw/src/CMakeLists.txt new file mode 100644 index 000000000..5fbc72f9a --- /dev/null +++ b/util/EnKF/arw/src/CMakeLists.txt @@ -0,0 +1,8 @@ +cmake_minimum_required(VERSION 2.6) + + set(CMAKE_Fortran_MODULE_DIRECTORY "${PROJECT_BINARY_DIR}/util/include") + set(UTIL_INC ${CMAKE_Fortran_MODULE_DIRECTORY}) + + add_subdirectory(enspreproc_regional.fd) + add_subdirectory(initialens_regional.fd) + diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/CMakeLists.txt b/util/EnKF/arw/src/enspreproc_regional.fd/CMakeLists.txt new file mode 100644 index 000000000..663f6323f --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 2.6) + set(GSI_Fortran_FLAGS_LOCAL "${GSI_Fortran_FLAGS} -DWRF") + file(GLOB LOCAL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.f90 ${CMAKE_CURRENT_SOURCE_DIR}/*.F90) + set_source_files_properties( ${LOCAL_SRC} PROPERTIES COMPILE_FLAGS ${GSI_Fortran_FLAGS_LOCAL} ) + include_directories( ${PROJECT_BINARY_DIR}/include ${CORE_INCS} ${NETCDF_INCLUDES} ${MPI_Fortran_INCLUDE_PATH} ) + + add_executable(enspreproc.x ${LOCAL_SRC} ) + set_target_properties( enspreproc.x PROPERTIES COMPILE_FLAGS ${GSI_Fortran_FLAGS_LOCAL} ) + target_link_libraries(enspreproc.x ${GSISHAREDLIB} ${GSILIB} ${GSISHAREDLIB} ${WRF_LIBRARIES} + ${NETCDF_LIBRARIES_F90} ${NETCDF_LIBRARIES} ${HDF5_Fortran_HL_LIBRARIES} ${MPI_Fortran_LIBRARIES} + ${LAPACK_LIBRARIES} -L./ ${EXTRA_LINKER_FLAGS} ${HDF5_LIBRARIES} ${CURL_LIBRARIES} ${CORE_LIBRARIES} ${CORE_BUILT} + ${GSI_LDFLAGS} ${NCDIAG_LIBRARIES} ${ZLIB_LIBRARIES} ${wrflib} ) + add_dependencies(enspreproc.x ${GSILIB}) diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/get_gefs_for_regional_enspro.f90 b/util/EnKF/arw/src/enspreproc_regional.fd/get_gefs_for_regional_enspro.f90 new file mode 100644 index 000000000..36378c5ab --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/get_gefs_for_regional_enspro.f90 @@ -0,0 +1,1492 @@ +subroutine get_gefs_for_regional_enspro(enpert4arw,wrt_pert_sub,wrt_pert_mem,jcap_ens) +!$$$ subprogram documentation block +! . . . . +! subprogram: get_gefs_for_regionl read gefsozone for regional +! prgmmr: parrish org: np22 date: 2010-09-26 +! +! abstract: read gefs and interpolate to regional ensemble grid. +! (adaptation of get_gefs_ensperts_dualres) +! +! +! program history log: +! 2010-09-26 parrish, initial documentation +! 2012-01-17 wu, clean up, add/setup option "full_ensemble" +! 2012-02-08 parrish - a little more cleanup +! 2012-10-11 wu - dual resolution for options of regional hybens +! 2013-02-21 wu - add call to general_destroy_spec_vars to fix memory problem +! 2013-10-19 todling - all guess variables in met-guess +! 2014-11-30 todling - update interface to general_read_gfs routines +! 2014-12-03 derber - changes to call for general_read_gfsatm +! 2015-05-12 wu - changes to read in multiple ensemble for 4DEnVar +! 2015-09-20 s.liu - use general sub2grid in grads1a +! 2016-05-19 Carley/s.liu - prevent the GSI from printing out erroneous error +! when using ensembles from different time +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ end documentation block + + use gridmod, only: idsl5,regional,use_gfs_nemsio + use gridmod, only: nlon,nlat,lat2,lon2,nsig,rotate_wind_ll2xy + use hybrid_ensemble_parameters, only: region_lat_ens,region_lon_ens + use hybrid_ensemble_parameters, only: ps_bar,nelen + use hybrid_ensemble_parameters, only: n_ens,grd_ens,grd_anl,grd_a1,grd_e1,p_e2a,uv_hyb_ens,dual_res + use hybrid_ensemble_parameters, only: full_ensemble,q_hyb_ens,l_ens_in_diff_time,write_ens_sprd + use hybrid_ensemble_parameters, only: ntlevs_ens,ensemble_path + !use hybrid_ensemble_parameters, only: add_bias_perturbation + use control_vectors, only: cvars2d,cvars3d,nc2d,nc3d + use gsi_bundlemod, only: gsi_bundlecreate + use gsi_bundlemod, only: gsi_bundle + use gsi_bundlemod, only: gsi_bundlegetpointer + use gsi_bundlemod, only: gsi_bundledestroy + use constants,only: zero,half,fv,rd_over_cp,one,h300,i_missing,r60,r3600 + use constants, only: rd,grav + use mpimod, only: mpi_comm_world,ierror,mype,mpi_rtype,mpi_min,mpi_max + use mpimod, only: mpi_info_null,mpi_offset_kind,mpi_mode_create + use mpimod, only: mpi_mode_wronly + use kinds, only: r_kind,i_kind,r_single + use general_sub2grid_mod, only: sub2grid_info,general_sub2grid_create_info + use general_sub2grid_mod, only: general_grid2sub,general_sub2grid + use general_sub2grid_mod, only: general_suba2sube,general_sube2suba + use general_sub2grid_mod, only: general_sub2grid_destroy_info + use general_sub2grid_mod, only: general_gather2grid + use general_specmod, only: spec_vars,general_init_spec_vars,general_destroy_spec_vars + use egrid2agrid_mod, only: g_create_egrid2points_slow,egrid2agrid_parm,g_egrid2points_faster + use sigio_module, only: sigio_intkind,sigio_head,sigio_srhead + use guess_grids, only: ges_prsl,ntguessig + use guess_grids, only: ges_tsen,ifilesig,hrdifsig + use aniso_ens_util, only: intp_spl + use obsmod, only: iadate + use mpimod, only: npe + use gsi_bundlemod, only: gsi_bundlegetpointer + use gsi_bundlemod, only: gsi_bundlecreate + use gsi_bundlemod, only: gsi_grid + use gsi_bundlemod, only: gsi_gridcreate + use gsi_bundlemod, only: gsi_bundle + use gsi_bundlemod, only: gsi_bundledestroy + use gsi_metguess_mod, only: GSI_MetGuess_Bundle + use mpeu_util, only: die + use gsi_4dvar, only: nhr_assimilation + use get_wrf_mass_ensperts_mod, only: get_wrf_mass_ensperts_class + + use nemsio_module, only: nemsio_init,nemsio_open,nemsio_close + use nemsio_module, only: nemsio_gfile,nemsio_getfilehead + + implicit none + + logical, intent(in) :: enpert4arw,wrt_pert_sub,wrt_pert_mem + integer(i_kind),intent(in) :: jcap_ens + type(sub2grid_info) grd_gfs,grd_mix,grd_gfst,grd_arw + type(get_wrf_mass_ensperts_class) :: wrf_mass_ensperts + type(spec_vars) sp_gfs + real(r_kind),allocatable,dimension(:,:,:) :: pri,prsl,prsl1000 + real(r_kind),pointer,dimension(:,:,:) :: vor =>null() + real(r_kind),pointer,dimension(:,:,:) :: div =>null() + real(r_kind),pointer,dimension(:,:,:) :: u =>null() + real(r_kind),pointer,dimension(:,:,:) :: v =>null() + real(r_kind),pointer,dimension(:,:,:) :: tv =>null() + real(r_kind),pointer,dimension(:,:,:) :: q =>null() + real(r_kind),pointer,dimension(:,:,:) :: cwmr=>null() + real(r_kind),pointer,dimension(:,:,:) :: oz =>null() + real(r_kind),pointer,dimension(:,:) :: z =>null() + real(r_kind),pointer,dimension(:,:) :: ps=>null() + real(r_kind),allocatable,dimension(:) :: ak5,bk5,ck5,tref5 + real(r_kind),allocatable :: work_sub(:,:,:,:),work(:,:,:,:),work_reg(:,:,:,:) + real(r_kind),allocatable :: tmp_ens(:,:,:,:),tmp_anl(:,:,:,:),tmp_ens2(:,:,:,:) + real(r_kind),allocatable,dimension(:,:,:)::stbar,vpbar,tbar,rhbar,ozbar,cwbar + real(r_kind),allocatable,dimension(:,:):: pbar_nmmb + real(r_kind),allocatable,dimension(:,:,:,:)::st_eg,vp_eg,t_eg,rh_eg,oz_eg,cw_eg + real(r_kind),allocatable,dimension(:,:,:):: p_eg_nmmb + real(r_kind),allocatable,dimension(:,:,:,:):: ges_prsl_e + real(r_kind),allocatable,dimension(:,:,:)::tsen,qs + real(r_kind),allocatable,dimension(:,:,:)::ut,vt,tt,rht,ozt,cwt + real(r_single),allocatable,dimension(:,:,:):: w3 + real(r_single),allocatable,dimension(:,:):: w2 + real(r_single),allocatable,dimension(:,:,:,:)::en_perts + real(r_kind),dimension(:,:,:),allocatable:: workh + real(r_kind),dimension(:),allocatable:: z1 + + character(len=*),parameter::myname='get_gefs_for_regional' + real(r_kind) bar_norm,sig_norm,kapr,kap1,trk + integer(i_kind) iret,i,j,k,k2,n,mm1,iderivative + integer(i_kind) ic2,ic3,it + integer(i_kind) ku,kv,kt,kq,koz,kcw,kz,kps + character(255) filename,filelists(ntlevs_ens) + logical ice + integer(sigio_intkind):: lunges = 11 + type(sigio_head):: sighead + type(egrid2agrid_parm) :: p_g2r + integer(i_kind) inner_vars,num_fields,nlat_gfs,nlon_gfs,nsig_gfs,jcap_gfs,jcap_gfs_test + integer(i_kind) nord_g2r,num_fieldst + logical,allocatable :: vector(:) + real(r_kind),parameter:: zero_001=0.001_r_kind + real(r_kind),allocatable,dimension(:) :: xspli,yspli,xsplo,ysplo + integer(i_kind) iyr,ihourg + integer(i_kind),dimension(7):: idate + integer(i_kind),dimension(4):: idate4 + integer(i_kind),dimension(8) :: ida,jda + integer(i_kind),dimension(5) :: iadate_gfs + real(r_kind) hourg + real(r_kind),dimension(5):: fha + integer(i_kind) istatus + real(r_kind) rdog,h,dz + real(r_kind),allocatable::height(:),zbarl(:,:,:) + logical add_bias_perturbation,inithead + integer(i_kind) n_ens_temp + real(r_kind),allocatable::psfc_out(:,:) + integer(i_kind) ilook,jlook,ier + character(len=3) :: charfhr + character(len=7) charmem + + + real(r_kind) dlon,dlat,uob,vob,dlon_ens,dlat_ens + integer(i_kind) ii,jj,n1 + integer(i_kind) iimax,iimin,jjmax,jjmin + integer(i_kind) nming1,nming2 + integer(i_kind) its,ite + real(r_kind) ratio_x,ratio_y + + integer(i_kind) :: nfhour, nfminute, nfsecondn, nfsecondd + integer(i_kind) :: idvc,idsl,lonb,latb,levs,jcap,nvcoord + character(8) filetype, mdlname + real(r_single),allocatable,dimension(:,:,:) :: vcoord + integer(i_kind) iret2 + type(nemsio_gfile) :: gfile_atm + + type(gsi_bundle) :: atm_bundle + type(gsi_grid) :: atm_grid + integer(i_kind),parameter :: n2d=2 + integer(i_kind),parameter :: n3d=8 + character(len=4), parameter :: vars2d(n2d) = (/ 'z ', 'ps ' /) + character(len=4), parameter :: vars3d(n3d) = (/ 'u ', 'v ', & + 'vor ', 'div ', & + 'tv ', 'q ', & + 'cw ', 'oz ' /) + + real(r_kind), pointer :: ges_ps(:,: )=>NULL() + real(r_kind), pointer :: ges_z (:,: )=>NULL() + real(r_kind), pointer :: ges_u (:,:,:)=>NULL() + real(r_kind), pointer :: ges_v (:,:,:)=>NULL() + real(r_kind), pointer :: ges_tv(:,:,:)=>NULL() + real(r_kind), pointer :: ges_q (:,:,:)=>NULL() + + integer(i_kind) :: iunit,lunit,count + integer(mpi_offset_kind) :: disp + character(len=500) :: filenameout + + add_bias_perturbation=.false. ! not fully activated yet--testing new adjustment of ps perturbions 1st + + if(ntlevs_ens > 1) then + do i=1,ntlevs_ens + write(filelists(i),'("filelist",i2.2)')ifilesig(i) + enddo + its=1 + ite=ntlevs_ens + else + write(filelists(1),'("filelist",i2.2)')nhr_assimilation + its=ntguessig + ite=ntguessig + endif + + do it=its,ite +! get pointers for typical meteorological fields + ier=0 + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'ps',ges_ps,istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'z', ges_z, istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'u', ges_u, istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'v', ges_v, istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'tv',ges_tv,istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'q' ,ges_q, istatus );ier=ier+istatus + if (ier/=0) call die(trim(myname),'cannot get pointers for met-fields, ier =',ier) + +! figure out what are acceptable dimensions for global grid, based on resolution of input spectral coefs +! need to inquire from file what is spectral truncation, then setup general spectral structure variable + +! filename='sigf06_ens_mem001' + if(ntlevs_ens > 1) then + open(10,file=trim(filelists(it)),form='formatted',err=30) + else + open(10,file=trim(filelists(1)),form='formatted',err=30) + endif + rewind (10) + do n=1,200 + read(10,'(a)',err=20,end=40)filename + enddo +40 n_ens=n-1 + +! set n_ens_temp depending on if we want to add bias perturbation to the ensemble + + if(add_bias_perturbation) then + n_ens_temp=n_ens+1 + else + n_ens_temp=n_ens + end if + + rewind (10) + read(10,'(a)',err=20,end=20)filename +!=========== + if ( .not. use_gfs_nemsio ) then + + open(lunges,file=trim(filename),form='unformatted') + call sigio_srhead(lunges,sighead,iret) + close(lunges) + + hourg=sighead%fhour + idate4=sighead%idate + nvcoord=sighead%nvcoord + + if(mype == 0) then + write(6,*) ' sighead%fhour,sighead%idate=',sighead%fhour,sighead%idate + write(6,*) ' iadate(y,m,d,hr,min)=',iadate + write(6,*) ' sighead%jcap,sighead%levs=',sighead%jcap,sighead%levs + write(6,*) ' sighead%latf,sighead%lonf=',sighead%latf,sighead%lonf + write(6,*) ' sighead%idvc,sighead%nvcoord=',sighead%idvc,sighead%nvcoord + write(6,*) ' sighead%idsl=',sighead%idsl + do k=1,sighead%levs+1 + write(6,*)' k,vcoord=',k,sighead%vcoord(k,:) + end do + end if + + idsl=sighead%idsl + idvc=sighead%idvc + nlat_gfs=sighead%latf+2 + nlon_gfs=sighead%lonf + nsig_gfs=sighead%levs + if(sighead%jcap > 0)then + jcap_gfs=sighead%jcap + else if(jcap_ens > 0)then + jcap_gfs=jcap_ens + else + write(6,*)'ERROR jcap is undefined' + call stop2(555) + endif + + if (allocated(vcoord)) deallocate(vcoord) + allocate(vcoord(nsig_gfs+1,3,2)) + vcoord(1:nsig_gfs+1,1:sighead%nvcoord,1)=sighead%vcoord(1:nsig_gfs+1,1:sighead%nvcoord) + +! Extract header information +! hourg = sighead%fhour +! idate4(1)= sighead%idate(1) +! idate4(2)= sighead%idate(2) +! idate4(3)= sighead%idate(3) +! idate4(4)= sighead%idate(4) + + else !NEMSIO + + call nemsio_init(iret=iret) + call nemsio_open(gfile_atm,filename,'READ',iret=iret) + idate = i_missing + nfhour = i_missing; nfminute = i_missing + nfsecondn = i_missing; nfsecondd = i_missing + idsl = i_missing + call nemsio_getfilehead(gfile_atm, idate=idate, gtype=filetype, & + modelname=mdlname, nfhour=nfhour, nfminute=nfminute, & + nfsecondn=nfsecondn, nfsecondd=nfsecondd, & + dimx=lonb, dimy=latb, dimz=levs, & + jcap=jcap, idvc=idvc, & + idsl=idsl, iret=iret2) + if ( nfhour == i_missing .or. nfminute == i_missing .or. & + nfsecondn == i_missing .or. nfsecondd == i_missing ) then + write(6,*)'READ_FILES: ***ERROR*** some forecast hour info ', & + 'are not defined in ', trim(filename) + write(6,*)'READ_FILES: nfhour = ', & + hourg + call stop2(80) + endif + + hourg = float(nfhour) + float(nfminute)/r60 + & + float(nfsecondn)/float(nfsecondd)/r3600 + idate4(1) = idate(4) !hour + idate4(2) = idate(2) !month + idate4(3) = idate(3) !day + idate4(4) = idate(1) !year + nlat_gfs=latb+2 + nlon_gfs=lonb + nsig_gfs=levs + if(jcap > 0)then + jcap_gfs=jcap + else if(jcap_ens > 0)then + jcap_gfs=jcap_ens + else + write(6,*)'ERROR jcap is undefined' + call stop2(555) + endif + + if (allocated(vcoord)) deallocate(vcoord) + allocate(vcoord(nsig_gfs+1,3,2)) + call nemsio_getfilehead(gfile_atm,iret=iret2,vcoord=vcoord) + if ( iret2 /= 0 ) then + write(6,*)' GESINFO: ***ERROR*** problem reading header ', & + 'vcoord, Status = ',iret2 + call stop2(99) + endif + + call nemsio_close(gfile_atm,iret=iret) +! Determine the type of vertical coordinate used by model because that +! nvcoord is no longer part of NEMSIO header output. + nvcoord=3 + if(maxval(vcoord(:,3,1))==zero .and. & + minval(vcoord(:,3,1))==zero ) then + nvcoord=2 + if(maxval(vcoord(:,2,1))==zero .and. & + minval(vcoord(:,2,1))==zero ) then + nvcoord=1 + end if + end if + if(mype == 0) then + write(6,*) 'fhour,idate=',hourg,idate4 + write(6,*) ' iadate(y,m,d,hr,min)=',iadate + write(6,*) ' jcap,levs=',jcap,levs + write(6,*) ' latf,lonf=',latb,lonb + write(6,*) ' idvc,nvcoord=',idvc,nvcoord + write(6,*) ' idsl=',idsl + do k=1,levs+1 + write(6,*)' k,vcoord=',k,vcoord(k,:,1) + end do + end if + + endif ! use_gfs_nemsio +!=========== +! Compute valid time from ensemble date and forecast length and compare to iadate, the analysis time + iyr=idate4(4) + ihourg=hourg + if(iyr>=0.and.iyr<=99) then + if(iyr>51) then + iyr=iyr+1900 + else + iyr=iyr+2000 + end if + end if + fha=zero ; ida=0; jda=0 + fha(2)=ihourg ! relative time interval in hours + ida(1)=iyr ! year + ida(2)=idate4(2) ! month + ida(3)=idate4(3) ! day + ida(4)=0 ! time zone + ida(5)=idate4(1) ! hour + call w3movdat(fha,ida,jda) + iadate_gfs(1)=jda(1) ! year + iadate_gfs(2)=jda(2) ! mon + iadate_gfs(3)=jda(3) ! day + if(ntlevs_ens > 1) then + iadate_gfs(4)=jda(5)+hrdifsig(ntguessig)-hrdifsig(it) ! hour + else + iadate_gfs(4)=jda(5) ! hour + endif + iadate_gfs(5)=0 ! minute + if(mype == 0) then + write(6,*)' in get_gefs_for_regional, iadate_gefs=',iadate_gfs + write(6,*)' in get_gefs_for_regional, iadate =',iadate + end if + call w3fs21(iadate,nming1) + call w3fs21(iadate_gfs,nming2) + if( (nming1/=nming2) .and. (.not.l_ens_in_diff_time) ) then + if(mype == 0) write(6,*)' GEFS ENSEMBLE MEMBER DATE NOT EQUAL TO ANALYSIS DATE, PROGRAM STOPS' +! call stop2(85) + end if + + +! set up ak5,bk5,ck5 for use in computing 3d pressure field (needed for vertical interp to regional) +! following is code segment from gesinfo.F90 + allocate(ak5(nsig_gfs+1)) + allocate(bk5(nsig_gfs+1)) + allocate(ck5(nsig_gfs+1)) + allocate(tref5(nsig_gfs)) + do k=1,nsig_gfs+1 + ak5(k)=zero + bk5(k)=zero + ck5(k)=zero + end do + if (nvcoord == 1) then + do k=1,nsig_gfs+1 + bk5(k) = vcoord(k,1,1) + end do + elseif (nvcoord == 2) then + do k = 1,nsig_gfs+1 + ak5(k) = vcoord(k,1,1)*zero_001 + bk5(k) = vcoord(k,2,1) + end do + elseif (nvcoord == 3) then + do k = 1,nsig_gfs+1 + ak5(k) = vcoord(k,1,1)*zero_001 + bk5(k) = vcoord(k,2,1) + ck5(k) = vcoord(k,3,1)*zero_001 + end do + else + write(6,*)'READ_GFS_OZONE_FOR_REGIONAL: ***ERROR*** INVALID value for nvcoord=',nvcoord + call stop2(85) + endif +! Load reference temperature array (used by general coordinate) + do k=1,nsig_gfs + tref5(k)=h300 + end do + + + inner_vars=1 +! nlat_gfs=sighead%latf+2 +! nlon_gfs=sighead%lonf +! nsig_gfs=sighead%levs + num_fields=6*nsig_gfs+2 ! want to transfer u,v,t,q,oz,cw,ps,z from gfs subdomain to slab + ! later go through this code, adapting gsibundlemod, since currently + ! hardwired. + num_fieldst=min(num_fields,npe) + allocate(vector(num_fields)) + vector=.false. + vector(1:2*nsig_gfs)=uv_hyb_ens + call general_sub2grid_create_info(grd_gfst,inner_vars,nlat_gfs,nlon_gfs,nsig_gfs,num_fieldst, & + .not.regional) + call general_sub2grid_create_info(grd_gfs,inner_vars,nlat_gfs,nlon_gfs,nsig_gfs,num_fields, & + .not.regional,vector) +! jcap_gfs=sighead%jcap + jcap_gfs_test=jcap_gfs + call general_init_spec_vars(sp_gfs,jcap_gfs,jcap_gfs_test,grd_gfs%nlat,grd_gfs%nlon) + +! also want to set up regional grid structure variable grd_mix, which still has number of +! vertical levels set to nsig_gfs, but horizontal dimensions set to regional domain. + + call general_sub2grid_create_info(grd_mix,inner_vars,grd_ens%nlat,grd_ens%nlon,nsig_gfs, & + num_fields,regional,vector) + +! create interpolation information for global grid to regional ensemble grid + + nord_g2r=4 + call g_create_egrid2points_slow(grd_ens%nlat*grd_ens%nlon,region_lat_ens,region_lon_ens, & + grd_gfs%nlat,sp_gfs%rlats,grd_gfs%nlon,sp_gfs%rlons,nord_g2r,p_g2r) + +! allocate mix ensemble space--horizontal on regional domain, vertical still gefs + allocate(st_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate(vp_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate( t_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate(rh_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate(oz_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate(cw_eg(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,n_ens)) + allocate( p_eg_nmmb(grd_mix%lat2,grd_mix%lon2,n_ens)) + st_eg=zero ; vp_eg=zero ; t_eg=zero ; rh_eg=zero ; oz_eg=zero ; cw_eg=zero + p_eg_nmmb=zero + +! begin loop over ensemble members + + rewind(10) + inithead=.true. + do n=1,n_ens + read(10,'(a)',err=20,end=20)filename +!mhu filename=trim(ensemble_path) // trim(filename) +! write(filename,100) n +!100 format('sigf06_ens_mem',i3.3) + + + +! allocate necessary space on global grid + call gsi_gridcreate(atm_grid,grd_gfs%lat2,grd_gfs%lon2,grd_gfs%nsig) + call gsi_bundlecreate(atm_bundle,atm_grid,'aux-atm-read',istatus,names2d=vars2d,names3d=vars3d) + if(istatus/=0) then + write(6,*)myname,': trouble creating atm_bundle' + call stop2(999) + endif + + if(use_gfs_nemsio)then + call general_read_gfsatm_nems(grd_gfst,sp_gfs,filename,uv_hyb_ens,.false.,.true., & + atm_bundle,.true.,iret) + else + call general_read_gfsatm(grd_gfst,sp_gfs,sp_gfs,filename,uv_hyb_ens,.false.,.true., & + atm_bundle,inithead,iret) + end if + inithead = .false. + + ier = 0 + call gsi_bundlegetpointer(atm_bundle,'vor' ,vor ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'div' ,div ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'u' ,u ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'v' ,v ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'tv' ,tv ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'q' ,q ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'oz' ,oz ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'cw' ,cwmr,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'z' ,z ,istatus) ; ier = ier + istatus + call gsi_bundlegetpointer(atm_bundle,'ps' ,ps ,istatus) ; ier = ier + istatus + if ( ier /= 0 ) call die(myname,': missing atm_bundle vars, aborting ...',ier) + + allocate(work_sub(grd_gfs%inner_vars,grd_gfs%lat2,grd_gfs%lon2,num_fields)) + do k=1,grd_gfs%nsig + ku=k ; kv=k+grd_gfs%nsig ; kt=k+2*grd_gfs%nsig ; kq=k+3*grd_gfs%nsig ; koz=k+4*grd_gfs%nsig + kcw=k+5*grd_gfs%nsig + do j=1,grd_gfs%lon2 + do i=1,grd_gfs%lat2 + work_sub(1,i,j,ku)=u(i,j,k) + work_sub(1,i,j,kv)=v(i,j,k) + work_sub(1,i,j,kt)=tv(i,j,k) + work_sub(1,i,j,kq)=q(i,j,k) + work_sub(1,i,j,koz)=oz(i,j,k) + work_sub(1,i,j,kcw)=cwmr(i,j,k) + end do + end do + end do + kz=num_fields ; kps=kz-1 + do j=1,grd_gfs%lon2 + do i=1,grd_gfs%lat2 + work_sub(1,i,j,kz)=z(i,j) + work_sub(1,i,j,kps)=ps(i,j) + end do + end do + + call gsi_bundledestroy(atm_bundle,istatus) + + allocate(work(grd_gfs%inner_vars,grd_gfs%nlat,grd_gfs%nlon,grd_gfs%kbegin_loc:grd_gfs%kend_alloc)) + call general_sub2grid(grd_gfs,work_sub,work) + deallocate(work_sub) + +! then interpolate to regional analysis grid + allocate(work_reg(grd_mix%inner_vars,grd_mix%nlat,grd_mix%nlon,grd_gfs%kbegin_loc:grd_gfs%kend_alloc)) + do k=grd_gfs%kbegin_loc,grd_gfs%kend_loc + call g_egrid2points_faster(p_g2r,work(1,1,1,k),work_reg(1,1,1,k),vector(k)) + end do + deallocate(work) + +! next general_grid2sub to go to regional grid subdomains. + allocate(work_sub(grd_mix%inner_vars,grd_mix%lat2,grd_mix%lon2,num_fields)) + call general_grid2sub(grd_mix,work_reg,work_sub) + deallocate(work_reg) + allocate(pri(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig+1)) + kz=num_fields ; kps=kz-1 +! compute 3d pressure on interfaces + kap1=rd_over_cp+one + kapr=one/rd_over_cp + pri=zero + k=1 + k2=grd_mix%nsig+1 + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pri(i,j,k)=work_sub(1,i,j,kps) + pri(i,j,k2)=zero + end do + end do + if (idvc /= 3) then + do k=2,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pri(i,j,k)=ak5(k)+bk5(k)*work_sub(1,i,j,kps) + end do + end do + end do + else + do k=2,grd_mix%nsig + kt=k+2*grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + trk=(half*(work_sub(1,i,j,kt-1)+work_sub(1,i,j,kt))/tref5(k))**kapr + pri(i,j,k)=ak5(k)+(bk5(k)*work_sub(1,i,j,kps))+(ck5(k)*trk) + end do + end do + end do + end if + +! Get 3d pressure field now on interfaces + allocate(prsl(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + if (idsl5/=2) then + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + do k=1,grd_mix%nsig + prsl(i,j,k)=((pri(i,j,k)**kap1-pri(i,j,k+1)**kap1)/& + (kap1*(pri(i,j,k)-pri(i,j,k+1))))**kapr + end do + end do + end do + else + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + do k=1,grd_mix%nsig + prsl(i,j,k)=(pri(i,j,k)+pri(i,j,k+1))*half + end do + end do + end do + end if +! !Compute geopotential height at interface between layers + allocate(zbarl(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(height(grd_mix%nsig)) + rdog=rd/grav + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + k = 1 + kt=k+2*grd_mix%nsig + h = rdog * work_sub(1,i,j,kt) + dz = h * log(pri(i,j,k)/prsl(i,j,k)) + height(k) = work_sub(1,i,j,kz)+dz + + do k=2,grd_mix%nsig + kt=k+2*grd_mix%nsig + h = rdog * half * (work_sub(1,i,j,kt-1)+work_sub(1,i,j,kt)) + dz = h * log(prsl(i,j,k-1)/prsl(i,j,k)) + height(k) = height(k-1) + dz + end do + do k=1,grd_mix%nsig + zbarl(i,j,k)=height(k) + end do + end do + end do + deallocate(pri,height) +!! recompute pbar using routine Wan-Shu obtained from Matt Pyle: + + allocate(tt(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(psfc_out(grd_mix%lat2,grd_mix%lon2)) + do k=1,grd_mix%nsig + kt=k+2*grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + tt(i,j,k)=work_sub(1,i,j,kt) + end do + end do + end do + mm1=mype+1 + ! !ilook=ide/2 + ! !jlook=jde/2 + ! !ilook=29 + ! !jlook=41 + ilook=-1 ; jlook=-1 + allocate(prsl1000(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + prsl1000=1000._r_kind*prsl + call compute_nmm_surfacep ( ges_z(:,:), zbarl,prsl1000, & + psfc_out,grd_mix%nsig,grd_mix%lat2,grd_mix%lon2, & + ilook,jlook) + deallocate(tt,zbarl,prsl1000) + psfc_out=.001_r_kind*psfc_out + ! psfc_out=ges_ps(:,:) + ! write(6,*)' min,max ges_ps-psfc_out=',& + ! minval(ges_ps(:,:)-psfc_out),maxval(ges_ps(:,:)-psfc_out) + ! pdiffmax=-huge(pdiffmax) + ! pdiffmin= huge(pdiffmin) + ! ! do j=2,grd_mix%lon2-1 + ! ! do i=2,grd_mix%lat2-1 + ! do j=1,grd_mix%lon2 + ! do i=1,grd_mix%lat2 + ! pdiffmax=max(ges_ps(i,j)-psfc_out(i,j),pdiffmax) + ! pdiffmin=min(ges_ps(i,j)-psfc_out(i,j),pdiffmin) + ! if(ges_ps(i,j)<10._r_kind) & + ! write(6,*)' small ges_ps,i,j,lat2,lon2,ig,jg,ide,jde=',i,j,grd_mix%lat2,grd_mix%lon2,& + ! grd_mix%istart(mm1)-2+i,grd_mix%jstart(mm1)-2+j,grd_mix%nlat,grd_mix%nlon + ! if(psfc_out(i,j)<10._r_kind) & + ! write(6,*)' small ens ps,i,j,lat2,lon2,ig,jg,ide,jde=',i,j,grd_mix%lat2,grd_mix%lon2,& + ! grd_mix%istart(mm1)-2+i,grd_mix%jstart(mm1)-2+j,grd_mix%nlat,grd_mix%nlon + ! end do + ! end do + ! call mpi_allreduce(pdiffmax,pdiffmax0,1,mpi_rtype,mpi_max,mpi_comm_world,ierror) + ! call mpi_allreduce(pdiffmin,pdiffmin0,1,mpi_rtype,mpi_min,mpi_comm_world,ierror) + ! if(mype==0) write(6,*)' min,max ges_ps - matt ps =',pdiffmin0,pdiffmax0 + + ! write(fname,'("matt_pbar_corrected")') + ! call grads1a(psfc_out,1,mype,trim(fname)) + ! write(fname,'("ges_ps")') + ! call grads1a(ges_ps(:,:),1,mype,trim(fname)) + + +! If not using Q perturbations, convert to RH + if (.not.q_hyb_ens) then + allocate(tsen(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(qs(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) +! Compute RH and potential virtual temp +! First step is go get sensible temperature and 3d pressure + do k=1,grd_mix%nsig + kt=k+2*grd_mix%nsig ; kq=k+3*grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + tsen(i,j,k)= work_sub(1,i,j,kt)/(one+fv*max(zero,work_sub(1,i,j,kq))) + end do + end do + end do + + ice=.true. + iderivative=0 + call genqsat(qs,tsen,prsl,grd_mix%lat2,grd_mix%lon2,grd_mix%nsig,ice,iderivative) + + do k=1,grd_mix%nsig + kt=k+2*grd_mix%nsig ; kq=k+3*grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + if(enpert4arw) then + work_sub(1,i,j,kq) = work_sub(1,i,j,kq) + else + work_sub(1,i,j,kq) = work_sub(1,i,j,kq)/qs(i,j,k) + endif + end do + end do + end do + deallocate(qs,tsen) + end if + do k=1,grd_mix%nsig + kt=k+2*grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + if(enpert4arw) then + work_sub(1,i,j,kt)=work_sub(1,i,j,kt)/(one+fv*max(zero,work_sub(1,i,j,kq))) & + /(0.01_r_kind*prsl(i,j,k))**rd_over_cp + else + work_sub(1,i,j,kt)=work_sub(1,i,j,kt)/(0.01_r_kind*prsl(i,j,k))**rd_over_cp + endif + end do + end do + end do + + deallocate(prsl) + + iimax=0 + iimin=grd_mix%nlat + jjmax=0 + jjmin=grd_mix%nlon + ratio_x=(nlon-one)/(grd_mix%nlon-one) + ratio_y=(nlat-one)/(grd_mix%nlat-one) + do k=1,grd_mix%nsig + ku=k ; kv=ku+grd_mix%nsig ; kt=kv+grd_mix%nsig ; kq=kt+grd_mix%nsig ; koz=kq+grd_mix%nsig + kcw=koz+grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + + ii=i+grd_mix%istart(mm1)-2 + jj=j+grd_mix%jstart(mm1)-2 + ii=min(grd_mix%nlat,max(1,ii)) + jj=min(grd_mix%nlon,max(1,jj)) + iimax=max(ii,iimax) + iimin=min(ii,iimin) + jjmax=max(jj,jjmax) + jjmin=min(jj,jjmin) + dlon_ens=float(jj) + dlat_ens=float(ii) + dlon=one+(dlon_ens-one)*ratio_x + dlat=one+(dlat_ens-one)*ratio_y + + call rotate_wind_ll2xy(work_sub(1,i,j,ku),work_sub(1,i,j,kv), & + uob,vob,region_lon_ens(ii,jj),dlon,dlat) + st_eg(i,j,k,n)=uob + vp_eg(i,j,k,n)=vob + + t_eg(i,j,k,n)=work_sub(1,i,j,kt) ! now pot virtual temp + rh_eg(i,j,k,n)=work_sub(1,i,j,kq) ! now rh + oz_eg(i,j,k,n)=work_sub(1,i,j,koz) + cw_eg(i,j,k,n)=work_sub(1,i,j,kcw) + end do + end do + end do + kz=num_fields ; kps=kz-1 + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + p_eg_nmmb(i,j,n)=psfc_out(i,j) + end do + end do + deallocate(work_sub,psfc_out) + +! pdiffmax=-huge(pdiffmax) +! pdiffmin= huge(pdiffmin) +! do j=1,grd_mix%lon2 +! do i=1,grd_mix%lat2 +! pdiffmax=max(ges_ps(i,j)-p_eg_nmmb(i,j,n),pdiffmax) +! pdiffmin=min(ges_ps(i,j)-p_eg_nmmb(i,j,n),pdiffmin) +! if(ges_ps(i,j)<10._r_kind) & +! write(6,*)' small ges_ps,i,j,lat2,lon2,ig,jg,ide,jde=',i,j,grd_mix%lat2,grd_mix%lon2,& +! grd_mix%istart(mm1)-1+i,grd_mix%jstart(mm1)-1+j,grd_mix%nlat,grd_mix%nlon +! if(p_eg_nmmb(i,j,n)<10._r_kind) & +! write(6,*)' small ens ps,i,j,lat2,lon2,ig,jg,ide,jde=',i,j,grd_mix%lat2,grd_mix%lon2,& +! grd_mix%istart(mm1)-1+i,grd_mix%jstart(mm1)-1+j,grd_mix%nlat,grd_mix%nlon +! end do +! end do +! call mpi_allreduce(pdiffmax,pdiffmax0,1,mpi_rtype,mpi_max,mpi_comm_world,ierror) +! call mpi_allreduce(pdiffmin,pdiffmin0,1,mpi_rtype,mpi_min,mpi_comm_world,ierror) +! if(mype==0) write(6,*)' with halo, n,min,max ges_ps - matt ps =',n,pdiffmin0,pdiffmax0 + + end do ! end loop over ensemble members. + +! next, compute mean of ensembles. + + allocate(stbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(vpbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate( tbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(rhbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(ozbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(cwbar(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + allocate(pbar_nmmb(grd_mix%lat2,grd_mix%lon2)) + +! compute mean state + stbar=zero ; vpbar=zero ; tbar=zero ; rhbar=zero ; ozbar=zero ; cwbar=zero + pbar_nmmb=zero + do n=1,n_ens + do k=1,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + stbar(i,j,k)=stbar(i,j,k)+st_eg(i,j,k,n) + vpbar(i,j,k)=vpbar(i,j,k)+vp_eg(i,j,k,n) + tbar(i,j,k)= tbar(i,j,k)+ t_eg(i,j,k,n) + rhbar(i,j,k)=rhbar(i,j,k)+rh_eg(i,j,k,n) + ozbar(i,j,k)=ozbar(i,j,k)+oz_eg(i,j,k,n) + cwbar(i,j,k)=cwbar(i,j,k)+cw_eg(i,j,k,n) + end do + end do + end do + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pbar_nmmb(i,j)=pbar_nmmb(i,j)+p_eg_nmmb(i,j,n) + end do + end do + end do + +! Convert to mean + bar_norm = one/float(n_ens) + do k=1,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + stbar(i,j,k)=stbar(i,j,k)*bar_norm + vpbar(i,j,k)=vpbar(i,j,k)*bar_norm + tbar(i,j,k)= tbar(i,j,k)*bar_norm + rhbar(i,j,k)=rhbar(i,j,k)*bar_norm + ozbar(i,j,k)=ozbar(i,j,k)*bar_norm + cwbar(i,j,k)=cwbar(i,j,k)*bar_norm + end do + end do + end do + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pbar_nmmb(i,j)=pbar_nmmb(i,j)*bar_norm +! also save pbar to module array ps_bar for possible use in vertical localization +! in terms of scale heights/normalized p/p + ps_bar(i,j,1)=pbar_nmmb(i,j) + end do + end do +! write(fname,'("test_pbar_uncorrected")') +! call grads1a(pbar,1,mype,trim(fname)) +! write(fname,'("test_ges_ps")') +! call grads1a(ges_ps,1,mype,trim(fname)) +! write(fname,'("test_ges_z")') +! call grads1a(ges_z,1,mype,trim(fname)) + +! Subtract mean from ensemble members, but save scaling by sqrt(1/(nens-1)) until after vertical interpolation + n1=1 +!www ensemble perturbation for all but the first member if full_ensemble + if(full_ensemble)n1=2 + + do n=n1,n_ens + do k=1,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + st_eg(i,j,k,n)=st_eg(i,j,k,n)-stbar(i,j,k) + vp_eg(i,j,k,n)=vp_eg(i,j,k,n)-vpbar(i,j,k) + t_eg(i,j,k,n)= t_eg(i,j,k,n)- tbar(i,j,k) + rh_eg(i,j,k,n)=rh_eg(i,j,k,n)-rhbar(i,j,k) + oz_eg(i,j,k,n)=oz_eg(i,j,k,n)-ozbar(i,j,k) + cw_eg(i,j,k,n)=cw_eg(i,j,k,n)-cwbar(i,j,k) + end do + end do + end do + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + p_eg_nmmb(i,j,n)=p_eg_nmmb(i,j,n)-pbar_nmmb(i,j) + end do + end do + end do + deallocate(stbar,vpbar,rhbar,ozbar,cwbar) + +! now obtain mean pressure prsl +! compute 3d pressure on interfaces + kap1=rd_over_cp+one + kapr=one/rd_over_cp + allocate(pri(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig+1)) + pri=zero + k=1 + k2=grd_mix%nsig+1 + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pri(i,j,k)=pbar_nmmb(i,j) + pri(i,j,k2)=zero + end do + end do + if (idvc /= 3) then + do k=2,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + pri(i,j,k)=ak5(k)+bk5(k)*pbar_nmmb(i,j) + end do + end do + end do + else + do k=2,grd_mix%nsig + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + trk=(half*(tbar(i,j,k-1)+tbar(i,j,k))/tref5(k))**kapr + pri(i,j,k)=ak5(k)+(bk5(k)*pbar_nmmb(i,j))+(ck5(k)*trk) + end do + end do + end do + end if + +! Get 3d pressure field now at layer midpoints + allocate(prsl(grd_mix%lat2,grd_mix%lon2,grd_mix%nsig)) + if (idsl/=2) then + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + do k=1,grd_mix%nsig + prsl(i,j,k)=((pri(i,j,k)**kap1-pri(i,j,k+1)**kap1)/& + (kap1*(pri(i,j,k)-pri(i,j,k+1))))**kapr + end do + end do + end do + else + do j=1,grd_mix%lon2 + do i=1,grd_mix%lat2 + do k=1,grd_mix%nsig + prsl(i,j,k)=(pri(i,j,k)+pri(i,j,k+1))*half + end do + end do + end do + end if + deallocate(pri,pbar_nmmb,tbar) + deallocate(ak5,bk5,ck5,tref5) + +! interpolate/extrapolate in vertical using yoshi's spline code. + +! first need ges_prsl_e, the 3d pressure on the ensemble grid. + + allocate(ges_prsl_e(grd_ens%inner_vars,grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + if(dual_res) then + call general_suba2sube(grd_a1,grd_e1,p_e2a,ges_prsl(:,1,1,it),ges_prsl_e(1,:,1,1),regional) ! x? + else + ges_prsl_e(1,:,:,:)=ges_prsl(:,:,:,it) + end if + + allocate(xspli(grd_mix%nsig),yspli(grd_mix%nsig),xsplo(grd_ens%nsig),ysplo(grd_ens%nsig)) + + allocate(ut(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(vt(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(tt(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(rht(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(ozt(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(cwt(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + + allocate(w3(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig)) + allocate(w2(grd_ens%lat2,grd_ens%lon2)) + allocate(en_perts(n_ens,grd_ens%lat2,grd_ens%lon2,nc2d+nc3d*grd_ens%nsig)) + + do n=1,n_ens + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + do k=1,grd_mix%nsig + xspli(k)=log(prsl(i,j,k)*10.0_r_kind) + end do + do k=1,grd_ens%nsig + xsplo(k)=log(ges_prsl_e(1,i,j,k)*10._r_kind) + end do + +! u + do k=1,grd_mix%nsig + yspli(k)=st_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + ut(i,j,k)=ysplo(k) + end do +! v + do k=1,grd_mix%nsig + yspli(k)=vp_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + vt(i,j,k)=ysplo(k) + end do +! t + do k=1,grd_mix%nsig + yspli(k)=t_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + ysplo(k)=ysplo(k)*(0.01_r_kind*ges_prsl_e(1,i,j,k))**rd_over_cp ! converting from pot Tv to Tv + tt(i,j,k)=ysplo(k) + end do +! rh + do k=1,grd_mix%nsig + yspli(k)=rh_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + rht(i,j,k)=ysplo(k) + end do +! oz + do k=1,grd_mix%nsig + yspli(k)=oz_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + ozt(i,j,k)=ysplo(k) + end do +! cw + do k=1,grd_mix%nsig + yspli(k)=cw_eg(i,j,k,n) + end do + call intp_spl(xspli,yspli,xsplo,ysplo,grd_mix%nsig,grd_ens%nsig) +! following is to correct for bug in intp_spl + do k=1,grd_ens%nsig + if(xsplo(k) < xspli(grd_mix%nsig)) ysplo(k)=yspli(grd_mix%nsig) + if(xsplo(k) > xspli(1)) ysplo(k)=yspli(1) + end do + do k=1,grd_ens%nsig + cwt(i,j,k)=ysplo(k) + end do + + end do + end do + +!wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww + if(n==1 .and. full_ensemble)then + + allocate(qs(lat2,lon2,nsig)) + ice=.true. + iderivative=0 + do k=1,nsig + do j=1,lon2 + do i=1,lat2 + qs(i,j,k)=ges_q(i,j,k) + end do + end do + end do + call genqsat(qs,ges_tsen(:,:,:,it),ges_prsl(:,:,:,it),lat2,lon2,nsig,ice,iderivative) + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +!!!!!!!!!!! The first member is full perturbation based on regional first guess !!! +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! put fist guess in ensemble grid & Subtract guess from 1st ensemble member (ensemble mean) + + if (dual_res) then + allocate ( tmp_ens(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig,1) ) + allocate ( tmp_ens2(grd_ens%lat2,grd_ens%lon2,grd_ens%nsig,1) ) + allocate ( tmp_anl(lat2,lon2,nsig,1) ) + + if (.not.q_hyb_ens) then + tmp_anl(:,:,:,1)=qs(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens,regional) + tmp_anl(:,:,:,1)=ges_q(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens2,regional) + rht(:,:,:) = rht(:,:,:)-tmp_ens2(:,:,:,1)/tmp_ens(:,:,:,1) + else + tmp_anl(:,:,:,1)=ges_q(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens2,regional) + rht(:,:,:) = rht(:,:,:)-tmp_ens2(:,:,:,1) + end if + + tmp_anl(:,:,:,1)=ges_u(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens,regional) + ut(:,:,:) = ut(:,:,:)-tmp_ens(:,:,:,1) + tmp_anl(:,:,:,1)=ges_v(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens,regional) + vt(:,:,:) = vt(:,:,:)-tmp_ens(:,:,:,1) + tmp_anl(:,:,:,1)=ges_tv(:,:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens,regional) + tt(:,:,:) = tt(:,:,:)-tmp_ens(:,:,:,1) + tmp_anl(:,:,1,1)=ges_ps(:,:) + call general_suba2sube(grd_a1,grd_e1,p_e2a,tmp_anl,tmp_ens,regional) + p_eg_nmmb(:,:,n) = p_eg_nmmb(:,:,n)-tmp_ens(:,:,1,1) + deallocate(tmp_anl,tmp_ens,tmp_ens2) + else + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + ut(i,j,k) = ut(i,j,k)-ges_u(i,j,k) + vt(i,j,k) = vt(i,j,k)-ges_v(i,j,k) + tt(i,j,k) = tt(i,j,k)-ges_tv(i,j,k) + end do + end do + end do + + if (.not.q_hyb_ens) then + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + rht(i,j,k) = rht(i,j,k)-ges_q(i,j,k)/qs(i,j,k) + end do + end do + end do + else + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + rht(i,j,k) = rht(i,j,k)-ges_q(i,j,k) + end do + end do + end do + end if + + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + p_eg_nmmb(i,j,n) = p_eg_nmmb(i,j,n)-ges_ps(i,j) + end do + end do + endif + deallocate(qs) + + endif ! n==1 .and. full_ensemble + +!wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww + +! transfer from temporary arrays to perturbation arrays and normalize by sig_norm + +! sig_norm from the following +! 2*J_b = x^T * (beta1*B + beta2*P_ens)^(-1) * x +! where P_ens is the ensemble covariance which is the sum of outer products of the +! ensemble perturbations (unnormalized) divided by n_ens-1 (or n_ens, depending on who you read). + sig_norm=sqrt(one/max(one,n_ens_temp-one)) + +! if(n_ens_temp==n_ens.and.n==n_ens+1) sig_norm=one +! if(n==1 .or. n==2 .or. n==50) then +! write(fname,'("test_pp_",i2.2)')n +! call grads1a(p_eg_nmmb(1,1,n),1,mype,trim(fname)) +! write(fname,'("test_up_",i2.2)')n +! call grads1a(ut,grd_ens%nsig,mype,trim(fname)) +! write(fname,'("test_vp_",i2.2)')n +! call grads1a(vt,grd_ens%nsig,mype,trim(fname)) +! write(fname,'("test_tp_",i2.2)')n +! call grads1a(tt,grd_ens%nsig,mype,trim(fname)) +! write(fname,'("test_rhp_",i2.2)')n +! call grads1a(rht,grd_ens%nsig,mype,trim(fname)) +!! write(fname,'("test_ozp_",i2.2)')n +!! call grads1a(ozt,grd_ens%nsig,mype,trim(fname)) +!! write(fname,'("test_cwp_",i2.2)')n +!! call grads1a(cwt,grd_ens%nsig,mype,trim(fname)) +! end if + do ic3=1,nc3d + +! if(ntlevs_ens > 1) then +! call gsi_bundlegetpointer(en_perts(n,it),trim(cvars3d(ic3)),w3,istatus) +! else +! call gsi_bundlegetpointer(en_perts(n,1),trim(cvars3d(ic3)),w3,istatus) +! endif +! if(istatus/=0) then +! write(6,*)' error retrieving pointer to ',trim(cvars3d(ic3)),' for ensemble member ',n +! call stop2(999) +! end if + + select case (trim(cvars3d(ic3))) + + case('sf','SF') + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w3(i,j,k) = ut(i,j,k)*sig_norm + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + case('vp','VP') + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w3(i,j,k) = vt(i,j,k)*sig_norm + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + case('t','T') + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w3(i,j,k) = tt(i,j,k)*sig_norm + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + case('q','Q') + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w3(i,j,k) = rht(i,j,k)*sig_norm + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + case('oz','OZ') +! temporarily ignore ozone perturbations + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + ! w3(i,j,k) = ozt(i,j,k)*sig_norm + w3(i,j,k) = zero + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + case('cw','CW') +! temporarily ignore cloud water perturbations + + do k=1,grd_ens%nsig + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + ! w3(i,j,k) = cwt(i,j,k)*sig_norm + w3(i,j,k) = zero + en_perts(n,i,j,(ic3-1)*grd_ens%nsig+k)=w3(i,j,k) + end do + end do + end do + + end select + end do + do ic2=1,nc2d + +! if(ntlevs_ens > 1) then +! call gsi_bundlegetpointer(en_perts(n,it),trim(cvars2d(ic2)),w2,istatus) +! else +! call gsi_bundlegetpointer(en_perts(n,1),trim(cvars2d(ic2)),w2,istatus) +! endif +! if(istatus/=0) then +! write(6,*)' error retrieving pointer to ',trim(cvars2d(ic2)),' for ensemble member ',n +! call stop2(999) +! end if + + select case (trim(cvars2d(ic2))) + + case('ps','PS') + + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w2(i,j) = p_eg_nmmb(i,j,n)*sig_norm + en_perts(n,i,j,nc3d*grd_ens%nsig+ic2)=w2(i,j) + end do + end do + + case('sst','SST') + +! dtk: temporarily ignore sst perturbations in hybrid + do j=1,grd_ens%lon2 + do i=1,grd_ens%lat2 + w2(i,j) = zero + en_perts(n,i,j,nc3d*grd_ens%nsig+ic2)=w2(i,j) + end do + end do + + end select + end do + end do + + call general_sub2grid_destroy_info(grd_gfs) + call general_sub2grid_destroy_info(grd_mix) + call general_sub2grid_destroy_info(grd_gfst) +! +! +! CALCULATE ENSEMBLE SPREAD + if(write_ens_sprd)then + call mpi_barrier(mpi_comm_world,ierror) +!gsd call wrf_mass_ensperts%ens_spread_dualres_regional(mype,en_perts,nelen) + call mpi_barrier(mpi_comm_world,ierror) + end if + + call general_destroy_spec_vars(sp_gfs) + deallocate(vector) + deallocate(st_eg,vp_eg,t_eg,rh_eg) + deallocate(oz_eg,cw_eg,p_eg_nmmb) + deallocate(ges_prsl_e) + deallocate(xspli,yspli,xsplo,ysplo) + deallocate(prsl) + deallocate(ut,vt,tt,rht,ozt,cwt) + + enddo ! it=1,ntlevs_ens + + iunit=20 + if(wrt_pert_sub) then ! write perturbations in subdomain + write(filename,'(a,I4.4)') 'saved_en_perts.pe',mype + if(mype==0) write(*,*) 'save en_perts as ', trim(filename) + open(iunit,file=trim(filename),form='unformatted') + do n=1,n_ens +! + write(iunit) n + write(iunit) ps_bar(:,:,1) +! if(mype==0) write(*,*) n,maxval(ps_bar(:,:,1)),minval(ps_bar(:,:,1)) +! + do ic3=1,nc3d + + do k=1,grd_ens%nsig + w3(:,:,k)=en_perts(n,:,:,(ic3-1)*grd_ens%nsig+k) + enddo + write(iunit) cvars3d(ic3) + write(iunit) w3 +! if(mype==0) write(*,*) ic3,cvars3d(ic3) + do k=1,nsig + if(mype==0) write(*,*) cvars3d(ic3),k,maxval(w3(:,:,k)),minval(w3(:,:,k)) + enddo + + end do + do ic2=1,nc2d + + w2(:,:)=en_perts(n,:,:,nc3d*grd_ens%nsig+ic2) + write(iunit) cvars2d(ic2) + write(iunit) w2 +! if(mype==0) write(*,*) ic2,cvars3d(ic2) +! if(mype==0) write(*,*) maxval(w2(:,:)),minval(w2(:,:)) + end do + + end do + close(iunit) + deallocate(w3,w2) + endif + + if(wrt_pert_mem) then + inner_vars=1 + num_fields=nc3d*grd_ens%nsig+nc2d + allocate(vector(num_fields)) + vector=.false. + + if(mype==0) write(*,*) 'final==',inner_vars,grd_ens%nlat,grd_ens%nlon,grd_ens%nsig,num_fields,regional + call general_sub2grid_create_info(grd_arw,inner_vars, & + grd_ens%nlat,grd_ens%nlon,grd_ens%nsig, & + num_fields,regional,vector) + allocate(work_sub(inner_vars,grd_arw%lat2,grd_arw%lon2,grd_arw%num_fields)) + allocate(work(inner_vars,grd_arw%nlat,grd_arw%nlon,grd_arw%kbegin_loc:grd_arw%kend_alloc)) + do n = 1,n_ens + do k = 1,num_fields ; do j = 1,grd_arw%lon2 ; do i = 1,grd_arw%lat2 + work_sub(1,i,j,k) = en_perts(n,i,j,k) + enddo ; enddo ; enddo + + call general_sub2grid(grd_arw,work_sub,work) + +! do k=grd_arw%kbegin_loc,grd_arw%kend_alloc +! write(*,*) k,maxval(work(1,:,:,k)),minval(work(1,:,:,k)) +! write(1000+k) work(1,:,:,k) +! enddo + write(charmem,'("_mem",i3.3)') n + filenameout="enspreproc_arw" // trim(adjustl(charmem)) + + call mpi_file_open(mpi_comm_world,trim(adjustl(filenameout)), & + mpi_mode_wronly+mpi_mode_create, & + mpi_info_null,lunit,ierror) + if ( ierror /= 0 ) then + write(6,'(a,i5,a,i5,a)') '***ERROR*** MPI_FILE_OPEN failed on task =', & + mype ,' ierror = ',ierror,' aborting!' + goto 999 + endif + + disp = grd_arw%nlat * grd_arw%nlon * (grd_arw%kbegin_loc-1) * r_kind + + call mpi_file_set_view(lunit,disp,mpi_rtype,mpi_rtype,'native',mpi_info_null,ierror) + if ( ierror /= 0 ) then + write(6,'(a,i5,a,i5,a)') '***ERROR*** MPI_FILE_SET_VIEW failed on task = ',& + mype ,' ierror = ',ierror,' aborting!' + goto 999 + endif + + count = grd_arw%nlat * grd_arw%nlon * grd_arw%nlevs_alloc + + call mpi_file_write(lunit,work,count,mpi_rtype,istatus,ierror) + if ( ierror /= 0 ) then + write(6,'(a,i5,a,i5,a)') '***ERROR*** MPI_FILE_WRITE failed on task =', & + mype ,' ierror = ',ierror,' aborting!' + goto 999 + endif + + call mpi_file_close(lunit,ierror) + if ( ierror /= 0 ) then + write(6,'(a,i5,a,i5,a)') '***ERROR*** MPI_FILE_CLOSE failed on task =', & + mype ,' ierror = ',ierror,' aborting!' + goto 999 + endif + + enddo ! do i_ens = 1,n_ens + + deallocate(work_sub,work,vector) + endif + + if(enpert4arw) then + inner_vars=1 +! num_fields=nc3d*grd_ens%nsig+nc2d + num_fields=1 + allocate(vector(num_fields)) + vector=.false. + + if(mype==0) write(*,*) 'final==',inner_vars,grd_ens%nlat,grd_ens%nlon,grd_ens%nsig,num_fields,regional + call general_sub2grid_create_info(grd_arw,inner_vars, & + grd_ens%nlat,grd_ens%nlon,grd_ens%nsig, & + num_fields,regional,vector) + + allocate(z1(grd_arw%inner_vars*grd_arw%nlat*grd_arw%nlon)) + allocate(workh(grd_arw%inner_vars,grd_arw%nlat,grd_arw%nlon)) + + sig_norm=1.0_r_kind/sig_norm + do n=1,n_ens + if(mype==0) then + write(filename,'(a,I4.4)') 'en_perts4arw.mem',n + if(mype==0) then + write(*,*) 'save perturbations for ', trim(filename) + write(*,*) nc3d,nc2d,cvars3d,cvars2d + write(*,*) grd_arw%nlat,grd_arw%nlon,grd_arw%nsig + endif + open(iunit,file=trim(filename),form='unformatted') + write(iunit) nc3d,nc2d,cvars3d,cvars2d + write(iunit) grd_arw%nlat,grd_arw%nlon,grd_arw%nsig + endif + + do k=1,nc3d*grd_ens%nsig+nc2d + + ii=0 + do j=1,lon2 + do i=1,lat2 + ii=ii+1 + z1(ii)=en_perts(n,i,j,k)*sig_norm + end do + end do + if(k==nc3d*grd_ens%nsig+1) z1=z1*1000.0 ! change Ps from CB to Pa) + call general_gather2grid(grd_arw,z1,workh,0) + if(mype==0) then + write(*,*) k,maxval(workh),minval(workh) + write(iunit) workh + endif + + end do + + if(mype==0) close(iunit) + enddo ! n + + deallocate(z1,workh,vector) + endif + + deallocate(en_perts) + + return + +30 write(6,*) 'GET_GEFS+FOR_REGIONAL open filelist failed ' + call stop2(555) +20 write(6,*) 'GET_GEFS+FOR_REGIONAL read gfs ens failed ',n + call stop2(555) +999 write(6,*) 'GET_GEFS+FOR_REGIONAL create full field failed',n + call stop2(666) +end subroutine get_gefs_for_regional_enspro + diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/gfsp2wrfg.f90 b/util/EnKF/arw/src/enspreproc_regional.fd/gfsp2wrfg.f90 new file mode 100644 index 000000000..6f2cd255f --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/gfsp2wrfg.f90 @@ -0,0 +1,167 @@ +PROGRAM gfsp2wrfg +! +!$$$ main program documentation block +! . . . . +! main program: gfsp2wrfg +! PRGMMR: HU ORG: GSD DATE: 2014-12-18 +! +! abstract: This program reads in GFS forecast spectral coefficients +! and convert them to WRF grids +! +! program history log: +! 2014-12-18 Hu initial code based on GSI +! +!EOP +!------------------------------------------------------------------------- + +! !USES: + use mpimod, only: npe,mpi_comm_world,ierror,mype + use mpeu_util, only: die +! use initial, only: miterrr +! use initial, only: init_namelist + use gridmod, only: wrf_mass_regional,diagnostic_reg,regional,use_gfs_nemsio + use gridmod, only: init_grid,init_reg_glob_ll,init_grid_vars,final_grid_vars + use gridmod, only: grid_ratio_wrfmass + use constants, only: init_constants,init_constants_derived + use guess_grids, only:create_ges_grids,destroy_ges_grids,nfldsig + use gridmod, only: nlat,nlon,lat2,lon2,nsig,regional,nsig_soil + use gridmod, only: jcap,nlat_regional,nlon_regional + use control_vectors, only: cvars3d,cvars2d,nrf_var + use control_vectors, only: init_anacv,final_anacv + use guess_grids, only: load_prsges,ges_prsl + use guess_grids_enspro, only: load_prsges_enspro + use gsi_metguess_mod, only: gsi_metguess_init,gsi_metguess_final + use state_vectors, only: init_anasv,final_anasv + use guess_grids, only: create_metguess_grids, destroy_metguess_grids + use hybrid_ensemble_isotropic, only: hybens_grid_setup + use hybrid_ensemble_isotropic, only: create_ensemble,destroy_ensemble + use hybrid_ensemble_parameters, only: grid_ratio_ens,n_ens + use hybrid_ensemble_parameters, only: uv_hyb_ens,grid_ratio_ens + use hybrid_ensemble_parameters, only: ntlevs_ens,ensemble_path + use guess_grids, only: ntguessig + use gridmod, only: wrf_mass_hybridcord + use gsi_4dvar, only: nhr_assimilation + + + implicit none + logical :: enpert4arw,wrt_pert_sub,wrt_pert_mem + integer :: jcap_ens +! +! Declare variables. +! + namelist/setup/ regional,wrf_mass_regional,diagnostic_reg, & + switch_on_derivatives,tendsflag,nfldsig, & + grid_ratio_ens,n_ens,grid_ratio_ens,grid_ratio_wrfmass,& + enpert4arw,wrt_pert_sub,wrt_pert_mem,wrf_mass_hybridcord,& + use_gfs_nemsio,jcap_ens +! +! +! + integer :: ios,k + character(len=80) :: myname_ + logical switch_on_derivatives,tendsflag +!EOC + +!--------------------------------------------------------------------------- +! NOAA/ESRL/GSD/EMB ! +!------------------------------------------------------------------------- +!BOP + +! MPI + call MPI_INIT(ierror) + call mpi_comm_size(mpi_comm_world,npe,ierror) + call mpi_comm_rank(mpi_comm_world,mype,ierror) +! +! + myname_='program gfsp2wrfg' + + if (mype==0) call w3tagb('GFSP2WRFG',1999,0232,0055,'GSD') +! +! +! intialization +! + call gsi_metguess_init + call init_anasv + call init_anacv + call init_constants_derived + call init_grid +! +! default namelist value +! + regional=.true. + wrf_mass_regional=.true. + diagnostic_reg=.true. + switch_on_derivatives=.false. + tendsflag=.false. + nfldsig=1 + grid_ratio_ens=1 + grid_ratio_wrfmass=1 + enpert4arw=.true. + wrt_pert_sub=.false. + wrt_pert_mem=.false. + wrf_mass_hybridcord=.false. + jcap_ens=574 + +! +! read in namelist +! + open(11,file='namelist.input') + read(11,setup,iostat=ios) + if(ios/=0) call die(myname_,'read(setup)',ios) + close(11) + +! Write namelist output to standard out + if(mype==0) then + write(6,200) +200 format(' calling gfsp2wrfg with following input parameters:',//) + write(6,setup) + endif + ntguessig=1 + ntlevs_ens=1 + uv_hyb_ens=.true. + nhr_assimilation=1 +! +! read in regional background and convert it to binary intermediate file +! + if (mype==0) call read_netcdf_mass4ens +! + call mpi_barrier(mpi_comm_world,ierror) +! + call init_constants(regional) + call init_reg_glob_ll(mype,21) + if(mype==0) write(*,*) size(cvars3d),size(cvars2d),size(nrf_var) + call init_grid_vars(jcap,npe,cvars3d,cvars2d,nrf_var,mype) +! +! + call create_metguess_grids(mype,ierror) + call create_ges_grids(switch_on_derivatives,tendsflag) + call mpi_barrier(mpi_comm_world,ierror) +! + call read_wrf_mass_netcdf_guess4ens(mype) + call mpi_barrier(mpi_comm_world,ierror) + call load_prsges +!mhu call load_prsges_enspro +! + call mpi_barrier(mpi_comm_world,ierror) + call hybens_grid_setup + call create_ensemble +! +!mhu call read_gfs_for_regional + call get_gefs_for_regional_enspro(enpert4arw,wrt_pert_sub,wrt_pert_mem,jcap_ens) + + +! release space + call destroy_ges_grids + call destroy_metguess_grids(mype,ierror) + + call final_grid_vars + call final_anacv + call final_anasv + call gsi_metguess_final + +! Done + if (mype==0) call w3tage('GFSP2WRFG') + + call mpi_finalize(ierror) + +END PROGRAM gfsp2wrfg diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/guess_grids_enspro.f90 b/util/EnKF/arw/src/enspreproc_regional.fd/guess_grids_enspro.f90 new file mode 100644 index 000000000..fb3351c63 --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/guess_grids_enspro.f90 @@ -0,0 +1,184 @@ +!------------------------------------------------------------------------- +! NOAA/NCEP, National Centers for Environmental Prediction GSI ! +!------------------------------------------------------------------------- +!BOP +! +! !MODULE: guess_grids --- Guess-related grid definitions +! +! !INTERFACE: +! + +module guess_grids_enspro + +! !USES: + + use kinds, only: r_single,r_kind,i_kind + use gridmod, only: regional + use gridmod, only: wrf_nmm_regional,nems_nmmb_regional + use gridmod, only: eta1_ll + use gridmod, only: eta2_ll + use gridmod, only: aeta1_ll + use gridmod, only: aeta2_ll + use gridmod, only: pdtop_ll + use gridmod, only: pt_ll + use guess_grids, only: ges_prsl,ges_prsi,ges_lnprsl,ges_lnprsi + use guess_grids, only: nfldsig,ntguessig + + use gsi_bundlemod, only : gsi_bundlegetpointer + use gsi_metguess_mod, only: gsi_metguess_bundle + + ! meteorological guess (beyond standard ones) + + use mpeu_util, only: die,tell + implicit none + +! !DESCRIPTION: module containing variables related to the guess fields +! +! !REVISION HISTORY: +! +!EOP +!------------------------------------------------------------------------- + +! set default to private + private +! set subroutines to public + public :: load_prsges_enspro + + character(len=*),parameter::myname='guess_grids' + +contains + +!------------------------------------------------------------------------- +! NOAA/NCEP, National Centers for Environmental Prediction GSI ! +!------------------------------------------------------------------------- +!BOP +! +! !IROUTINE: load_prsges --- Populate guess pressure arrays +! +! !INTERFACE: +! + subroutine load_prsges_enspro + +! !USES: + + use constants,only: zero,one,rd_over_cp,one_tenth,half,ten + use gridmod, only: lat2,lon2,nsig,ak5,bk5,ck5,tref5,idvc5,& + regional,wrf_nmm_regional,nems_nmmb_regional,wrf_mass_regional,& + cmaq_regional,pt_ll,aeta2_ll,& + aeta1_ll,eta2_ll,pdtop_ll,eta1_ll,twodvar_regional,idsl5 + implicit none + +! !DESCRIPTION: populate guess pressure arrays +! +! !REVISION HISTORY: +! 2003-10-15 kleist +! 2004-03-22 parrish, regional capability added +! 2004-05-14 kleist, documentation +! 2004-07-15 todling, protex-compliant prologue; added onlys +! 2004-07-28 treadon - remove subroutine call list, pass variables via modules +! 2005-05-24 pondeca - add regional surface analysis option +! 2006-04-14 treadon - unify global calculations to use ak5,bk5 +! 2006-04-17 treadon - add ges_psfcavg and ges_prslavg for regional +! 2006-07-31 kleist - use ges_ps instead of ln(ps) +! 2007-05-08 kleist - add fully generalized coordinate for pressure calculation +! 2011-07-07 todling - add cap for log(pressure) calculation +! +! !REMARKS: +! language: f90 +! machine: ibm rs/6000 sp; SGI Origin 2000; Compaq/HP +! +! !AUTHOR: +! kleist org: w/nmc20 date: 2003-10-15 +! +!EOP +!------------------------------------------------------------------------- + +! Declare local parameter + character(len=*),parameter::myname_=myname//'*load_prsges' + real(r_kind),parameter:: r1013=1013.0_r_kind + +! Declare local variables + real(r_kind) kap1,kapr,trk + real(r_kind),dimension(:,:) ,pointer::ges_ps=>NULL() + real(r_kind),dimension(:,:,:),pointer::ges_tv=>NULL() + integer(i_kind) i,j,k,jj,itv,ips + logical ihaveprs(nfldsig) + + kap1=rd_over_cp+one + kapr=one/rd_over_cp + + ihaveprs=.false. + do jj=1,nfldsig + call gsi_bundlegetpointer(gsi_metguess_bundle(jj),'ps' ,ges_ps,ips) + if(ips/=0) call die(myname_,': ps not available in guess, abort',ips) + call gsi_bundlegetpointer(gsi_metguess_bundle(jj),'tv' ,ges_tv,itv) + if(idvc5==3) then + if(itv/=0) call die(myname_,': tv must be present when idvc5=3, abort',itv) + endif + do k=1,nsig+1 + do j=1,lon2 + do i=1,lat2 + if(regional) then + if (wrf_nmm_regional.or.nems_nmmb_regional.or.& + cmaq_regional ) & + ges_prsi(i,j,k,jj)=one_tenth* & + (eta1_ll(k)*pdtop_ll + & + eta2_ll(k)*(ten*ges_ps(i,j)-pdtop_ll-pt_ll) + & + pt_ll) + + if (wrf_mass_regional .or. twodvar_regional) & + ges_prsi(i,j,k,jj)=one_tenth*(eta1_ll(k)*(ten*ges_ps(i,j)-pt_ll) + pt_ll) + endif + ges_prsi(i,j,k,jj)=max(ges_prsi(i,j,k,jj),zero) + ges_lnprsi(i,j,k,jj)=log(max(ges_prsi(i,j,k,jj),0.0001_r_kind)) + end do + end do + end do + ihaveprs(jj)=.true. + end do + + if(regional) then + if (wrf_nmm_regional.or.nems_nmmb_regional.or.cmaq_regional) then +! load using aeta coefficients + do jj=1,nfldsig + call gsi_bundlegetpointer(gsi_metguess_bundle(jj),'ps' ,ges_ps ,ips) + do k=1,nsig + do j=1,lon2 + do i=1,lat2 + ges_prsl(i,j,k,jj)=one_tenth* & + (aeta1_ll(k)*pdtop_ll + & + aeta2_ll(k)*(ten*ges_ps(i,j)-pdtop_ll-pt_ll) + & + pt_ll) + ges_lnprsl(i,j,k,jj)=log(ges_prsl(i,j,k,jj)) + + end do + end do + end do + end do + end if ! end if wrf_nmm regional block + if (wrf_mass_regional .or. twodvar_regional) then +! load using aeta coefficients + do jj=1,nfldsig + call gsi_bundlegetpointer(gsi_metguess_bundle(jj),'ps' ,ges_ps ,ips) + write(*,*) 'ps==',maxval(ges_ps),minval(ges_ps) + + do k=1,nsig + do j=1,lon2 + do i=1,lat2 + ges_prsl(i,j,k,jj)=one_tenth*(aeta1_ll(k)*(ten*ges_ps(i,j)-pt_ll)+pt_ll) + ges_lnprsl(i,j,k,jj)=log(ges_prsl(i,j,k,jj)) + end do + end do + end do + end do + end if ! end if wrf_mass regional block + + else + + end if ! end regional/global block + + return + end subroutine load_prsges_enspro + + +end module guess_grids_enspro diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/read_wrf_mass_guess4ens.F90 b/util/EnKF/arw/src/enspreproc_regional.fd/read_wrf_mass_guess4ens.F90 new file mode 100644 index 000000000..b18911047 --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/read_wrf_mass_guess4ens.F90 @@ -0,0 +1,233 @@ +#ifdef WRF + +subroutine read_wrf_mass_netcdf_guess4ens(mype) +!$$$ subprogram documentation block +! . . . . +! subprogram: read_wrf_mass_guess read wrf_mass interface file +! prgmmr: parrish org: np22 date: 2003-09-05 +! +! abstract: in place of read_guess for global application, read guess +! from regional model, in this case the wrf mass core model. +! This version reads a binary file created +! in a previous step that interfaces with the wrf infrastructure. +! A later version will read directly from the wrf restart file. +! The guess is read in by complete horizontal fields, one field +! per processor, in parallel. Each horizontal input field is +! converted from the staggered c-grid to an unstaggered a-grid. +! On the c-grid, u is shifted 1/2 point in the negative x direction +! and v 1/2 point in the negative y direction, but otherwise the +! three grids are regular. When the fields are read in, nothing +! is done to mass variables, but wind variables are interpolated to +! mass points. +! +! program history log: +! 2014-12-23 Hu +! +! input argument list: +! mype - pe number +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ + use kinds, only: r_kind,r_single,i_kind + use mpimod, only: mpi_sum,mpi_integer,mpi_real4,mpi_comm_world,npe,ierror + use mpimod, only: npe + use guess_grids, only: nfldsig,ifilesig,ntguessig + use gridmod, only: lat2,lon2,nlat_regional,nlon_regional,nlon, nlat,& + nsig,nsig_soil,ijn_s,displs_s,eta1_ll,pt_ll,itotsub,aeta1_ll + use constants, only: zero,one,grav,fv,zero_single,rd_over_cp_mass,one_tenth,r10,r100 + use constants, only: r0_01, tiny_r_kind + use gsi_io, only: lendian_in + use general_sub2grid_mod, only: sub2grid_info,general_sub2grid_create_info + use general_sub2grid_mod, only: general_grid2sub,general_sub2grid + use gsi_bundlemod, only: GSI_BundleGetPointer + use gsi_metguess_mod, only: gsi_metguess_get,GSI_MetGuess_Bundle + use mpeu_util, only: die + use mod_wrfmass_to_a, only: wrfmass_h_to_a4 + + implicit none + +! Declare passed variables + integer(i_kind),intent(in):: mype + +! Declare local parameters + real(r_kind),parameter:: rough_default=0.05_r_kind + character(len=*),parameter::myname='read_wrf_mass_netcdf_guess::' + +! Declare local variables + + real(r_kind), pointer :: ges_ps_it (:,: )=>NULL() + real(r_kind), pointer :: ges_z_it (:,: )=>NULL() + + real(r_single) :: ges_vpt_it (lat2,lon2 ) +! other internal variables + type(sub2grid_info) grd + real(r_single),allocatable::temp1(:,:) + character(6) filename + integer(i_kind) ifld,im,jm,lm,num_mass_fields + integer(i_kind) i,icount,icount_prev,it,j,k + real(r_kind) psfc_this,psfc_this_dry,sm_this,xice_this + real(r_kind),dimension(lat2,lon2):: q_integral + real(r_kind) deltasigma + real(r_kind):: work_prsl,work_prslk + integer(i_kind) ier, istatus + integer(i_kind) nguess + logical regional + logical,allocatable :: vector(:) + integer(i_kind) inner_vars,num_fields + real(r_kind),allocatable :: work_sub(:,:,:,:),work_reg(:,:,:,:) + real(r_single) :: ges_ps(lat2,lon2) + real(r_single) :: bb(nlon,nlat) + +! WRF MASS input grid dimensions in module gridmod +! These are the following: +! im -- number of x-points on C-grid +! jm -- number of y-points on C-grid +! lm -- number of vertical levels ( = nsig for now) + + if(mype==0) write(6,*)' at 0 in read_wrf_mass_guess4ens' + regional=.true. + +! Big section of operations done only on first outer iteration + + im=nlon_regional + jm=nlat_regional + lm=nsig + nfldsig=1 + + do it=1,nfldsig + write(filename,'("sigf",i2.2)') it + open(lendian_in,file=filename,form='unformatted') ; rewind lendian_in + write(6,*)'READ_WRF_MASS_GUESS: open lendian_in=',lendian_in,' to file=',filename + +! get pointers for typical meteorological fields + ier=0 + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it),'ps',ges_ps_it, & + istatus );ier=ier+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it),'z', ges_z_it, & + istatus );ier=ier+istatus + if (ier/=0) call die(trim(myname),'cannot get pointers for met-fields,ier =',ier) +! +! skip some record + read(lendian_in) ! head + read(lendian_in) ! aeta1 + read(lendian_in) ! eta1 + read(lendian_in) ! glat,dx_mc + read(lendian_in) ! glon,dy_mc + + enddo +! + allocate(temp1(im,jm)) + + inner_vars=1 + num_fields=1 ! mu and qall + allocate(vector(num_fields)) + vector=.false. + call general_sub2grid_create_info(grd,inner_vars,nlat,nlon,1,num_fields,regional,vector) + allocate(work_reg(grd%nlat,grd%nlon,grd%kbegin_loc:grd%kend_alloc,1)) + allocate(work_sub(grd%lat2,grd%lon2,num_fields,1)) + +! read surface dry pressure: + read(lendian_in) ((temp1(i,j),i=1,im),j=1,jm) + if(nlon == nlon_regional .and. nlat == nlat_regional) then + bb=temp1 + else + call wrfmass_h_to_a4(temp1,bb) + endif + + do j=1,grd%nlat + do i=1,grd%nlon + work_reg(j,i,grd%kbegin_loc:grd%kend_alloc,1)=bb(i,j) + enddo + enddo +! next general_grid2sub to go to regional grid subdomains. + call general_grid2sub(grd,work_reg,work_sub) + ges_ps(:,:)=work_sub(:,:,1,1) + write(*,'(a,I5,2f15.7)') 'ges_ps=',mype,maxval(ges_ps_it), & + minval(ges_ps_it) + +! read qvapor total + read(lendian_in) ((temp1(i,j),i=1,im),j=1,jm) + if(nlon == nlon_regional .and. nlat == nlat_regional) then + bb=temp1 + else + call wrfmass_h_to_a4(temp1,bb) + endif + + do j=1,grd%nlat + do i=1,grd%nlon + work_reg(j,i,grd%kbegin_loc:grd%kend_alloc,1)=bb(i,j) + enddo + enddo +! next general_grid2sub to go to regional grid subdomains. + call general_grid2sub(grd,work_reg,work_sub) + ges_vpt_it(:,:)=work_sub(:,:,1,1) + write(*,'(a,I5,2f15.7)') 'ges_vpt_it=',mype,maxval(ges_vpt_it), & + minval(ges_vpt_it) + +! read topo + read(lendian_in) ((temp1(i,j),i=1,im),j=1,jm) + if(nlon == nlon_regional .and. nlat == nlat_regional) then + bb=temp1 + else + call wrfmass_h_to_a4(temp1,bb) + endif + + do j=1,grd%nlat + do i=1,grd%nlon + work_reg(j,i,grd%kbegin_loc:grd%kend_alloc,1)=bb(i,j) + enddo + enddo +! next general_grid2sub to go to regional grid subdomains. + call general_grid2sub(grd,work_reg,work_sub) + ges_z_it(:,:)=work_sub(:,:,1,1)/grav +! write(*,'(a,I5,2f15.7)') 'ges_z_it=',mype,maxval(ges_z_it), & +! minval(ges_z_it) + + close(lendian_in) + it=1 + do i=1,lon2 + do j=1,lat2 +! Convert psfc units of mb and then convert to log(psfc) in cb + psfc_this_dry=r0_01*ges_ps(j,i) + psfc_this=(psfc_this_dry-pt_ll)*ges_vpt_it(j,i)+pt_ll + ges_ps_it(j,i)=one_tenth*psfc_this ! convert from mb to cb + end do + end do +! write(*,*) 'final ps==',mype,maxval(ges_ps_it(:,:)),minval(ges_ps_it(:,:)) + + deallocate(work_reg) + deallocate(work_sub) + deallocate(temp1) + + return +end subroutine read_wrf_mass_netcdf_guess4ens +#else /* Start no WRF-library block */ +subroutine read_wrf_mass_netcdf_guess4ens(mype) +!$$$ subprogram documentation block +! . . . . +! subprogram: read_wrf_mass_netcdf_guess +! prgmmr: +! +! abstract: +! +! program history log: +! 2009-12-07 lueken - added subprogram doc block and implicit none +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ end documentation block + use kinds,only: i_kind + implicit none + integer(i_kind),intent(in)::mype + write(6,*)'READ_WRF_MASS_NETCDF_GUESS: dummy routine, does nothing!' +end subroutine read_wrf_mass_netcdf_guess4ens +#endif /* End no WRF-library block */ diff --git a/util/EnKF/arw/src/enspreproc_regional.fd/wrf_netcdf_interface4ens.F90 b/util/EnKF/arw/src/enspreproc_regional.fd/wrf_netcdf_interface4ens.F90 new file mode 100755 index 000000000..5666a1a7d --- /dev/null +++ b/util/EnKF/arw/src/enspreproc_regional.fd/wrf_netcdf_interface4ens.F90 @@ -0,0 +1,1267 @@ +#ifdef WRF +subroutine read_netcdf_mass4ens +!$$$ subprogram documentation block +! . . . . +! subprogram: read_netcdf_mass read wrf mass netcdf restart +! prgmmr: parrish org: np22 date: 2003-09-05 +! +! abstract: using wrf library routines, read a wrf mass core netcdf +! format restart file. write the result to temporary netcdf +! file expected by read_wrf_mass_guess. +! +! program history log: +! 2014-12-18 parrish +! +! input argument list: +! +! output argument list: +! +! NOTES: this is beginning of allowing direct connection of gsi to wrf files +! without seperate external interface. it is very inefficient, and +! later versions will be made to reduce the total i/o involved. +! +! attributes: +! language: f90 +! machine: ibm RS/6000 SP +! +!$$$ + + use kinds, only: r_single,i_kind, r_kind + use constants, only: h300,one + use gsi_4dvar, only: nhr_assimilation + use rapidrefresh_cldsurf_mod, only: l_cloud_analysis,l_gsd_soilTQ_nudge + use gsi_metguess_mod, only: gsi_metguess_get + use gridmod, only: wrf_mass_hybridcord + + implicit none + +! Declare local parameters + real(r_single),parameter:: one_single = 1.0_r_single + real(r_single),parameter:: r45 = 45.0_r_single + + character(len=120) :: flnm1 + character(len=19) :: DateStr1 + character(len=6) :: filename + integer(i_kind) :: dh1 + + integer(i_kind) :: iunit + + integer(i_kind) :: i,j,k + integer(i_kind) :: ndim1 + integer(i_kind) :: WrfType + integer(i_kind), dimension(4) :: start_index, end_index + character (len= 4) :: staggering=' N/A' + character (len= 3) :: ordering + + character (len=80), dimension(3) :: dimnames + character (len=80) :: SysDepInfo + + integer(i_kind) :: nguess, ierr, Status, Status_next_time, n + +! binary stuff + +! rmse stuff + + character (len=31) :: rmse_var + integer(i_kind) iyear,imonth,iday,ihour,iminute,isecond + integer(i_kind) nlon_regional,nlat_regional,nsig_regional,nsig_soil_regional + real(r_single) pt_regional + real(r_single) rdx,rdy + real(r_single),allocatable::field3(:,:,:),field2(:,:),field1(:),field2b(:,:),field2c(:,:) + real(r_single),allocatable::field3u(:,:,:),field3v(:,:,:),field1a(:) + integer(i_kind),allocatable::ifield2(:,:) + real(r_single) rad2deg_single + integer(i_kind) wrf_real + real(r_kind),allocatable :: q_integral(:,:) + real(r_kind) deltasigma + data iunit / 15 / + + wrf_real=104 + end_index=0 + start_index=0 + +! transfer code from diffwrf for converting netcdf wrf nmm restart file +! to temporary binary format + + call ext_ncd_ioinit(sysdepinfo,status) + call set_wrf_debug_level ( 5 ) + + nhr_assimilation=1 + n_loop: do n=1,1 ! loop over forecast hours in assim interval + + if(n==nhr_assimilation)then + flnm1 = 'wrf_inout' + else + write(flnm1,'("wrf_inou",i1.1)')n + endif + + call ext_ncd_open_for_read( trim(flnm1), 0, 0, "", dh1, Status) + if(n==nhr_assimilation)then + if ( Status /= 0 )then + write(6,*)'CONVERT_NETCDF_MASS: problem with flnm1 = ',& + trim(flnm1),', Status = ', Status + call stop2(74) + endif + else + if ( Status /= 0 )then + write(6,*)'CONVERT_NETCDF_MASS: problem with flnm1 = ',& + trim(flnm1),', Status = ', Status + cycle n_loop + endif + endif + + + write(filename,'("sigf",i2.2)') n + open(iunit,file=filename,form='unformatted') + + write(6,*)' dh1 = ',dh1 !DEDE + +!------------- get date info + + call ext_ncd_get_next_time(dh1, DateStr1, Status_next_time) + read(DateStr1,'(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,i2)') iyear,imonth,iday,ihour,iminute,isecond + write(6,*)' iy,m,d,h,m,s=',iyear,imonth,iday,ihour,iminute,isecond + +! write(6,*)' dh1 = ',dh1 !DEDE + +!------------- get grid info + rmse_var='SMOIS' + + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) !DEDE + + write(6,*)' dh1 = ',dh1 !DEDE + write(6,*)'rmse_var = ',trim(rmse_var) + write(6,*)'ndim1 = ',ndim1 + write(6,*)'ordering = ',trim(ordering) + write(6,*)'staggering = ',trim(staggering) + write(6,*)'start_index = ',start_index + write(6,*)'end_index = ',end_index + write(6,*)'WrfType = ',WrfType + write(6,*)'ierr = ',ierr !DEDE + + nlon_regional=end_index(1) + nlat_regional=end_index(2) + nsig_soil_regional=end_index(3) + + rmse_var='T' + + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) !DEDE + + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1 = ',ndim1,' dh1 = ',dh1 + write(6,*)' WrfType = ',WrfType,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + + nlon_regional=end_index(1) + nlat_regional=end_index(2) + nsig_regional=end_index(3) + write(6,*)' nlon,lat,sig_regional=',nlon_regional,nlat_regional,nsig_regional + allocate(field2(nlon_regional,nlat_regional),field3(nlon_regional,nlat_regional,nsig_regional+1)) + allocate(field2b(nlon_regional,nlat_regional),field2c(nlon_regional,nlat_regional)) + allocate(ifield2(nlon_regional,nlat_regional)) + allocate(q_integral(nlon_regional,nlat_regional)) + allocate(field1(max(nlon_regional,nlat_regional,nsig_regional))) + allocate(field1a(max(nlon_regional,nlat_regional,nsig_regional))) + + rmse_var='P_TOP' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + pt_regional,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' p_top=',pt_regional + + write(iunit) iyear,imonth,iday,ihour,iminute,isecond, & + nlon_regional,nlat_regional,nsig_regional,pt_regional,nsig_soil_regional + + if(wrf_mass_hybridcord) then + rmse_var='C3H' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional + write(6,*)' k,c3h(k)=',k,field1(k) + end do + rmse_var='C4H' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1a,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional + write(6,*)' k,c4h(k)=',k,field1a(k) + end do + write(iunit)field1(1:nsig_regional),field1a(1:nsig_regional) ! c3h,c4h + + rmse_var='C3F' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional+1 + write(6,*)' k,c3f(k)=',k,field1(k) + end do + rmse_var='C4F' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1a,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional+1 + write(6,*)' k,c3f(k)=',k,field1a(k) + end do + write(iunit)field1(1:nsig_regional+1),field1a(1:nsig_regional+1) !c3f,c4f + else + + rmse_var='ZNU' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional + write(6,*)' k,znu(k)=',k,field1(k) + end do + field1a=0.0_r_single + write(iunit)field1(1:nsig_regional),field1a(1:nsig_regional) ! ZNU + + rmse_var='ZNW' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering,& + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering =',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field1,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + do k=1,nsig_regional+1 + write(6,*)' k,znw(k)=',k,field1(k) + end do + field1a=0.0_r_single + write(iunit)field1(1:nsig_regional+1),field1a(1:nsig_regional+1) ! ZNW + endif + + rmse_var='RDX' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + rdx,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' 1/rdx=',one_single/rdx + + rmse_var='RDY' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + rdy,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' 1/rdy=',one_single/rdy + + rmse_var='MAPFAC_M' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' max,min mapfac_m=',maxval(field2),minval(field2) + write(6,*)' max,min MAPFAC_M(:,1)=',maxval(field2(:,1)),minval(field2(:,1)) + write(6,*)' max,min MAPFAC_M(1,:)=',maxval(field2(1,:)),minval(field2(1,:)) + write(6,*)' mapfac_m(1,1),mapfac_m(nlon,1)=',field2(1,1),field2(nlon_regional,1) + write(6,*)' mapfac_m(1,nlat),mapfac_m(nlon,nlat)=', & + field2(1,nlat_regional),field2(nlon_regional,nlat_regional) + field2b=one_single/(field2*rdx) !DX_MC + field2c=one_single/(field2*rdy) !DY_MC + + rad2deg_single=r45/atan(one_single) + rmse_var='XLAT' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' max,min XLAT(:,1)=',maxval(field2(:,1)),minval(field2(:,1)) + write(6,*)' max,min XLAT(1,:)=',maxval(field2(1,:)),minval(field2(1,:)) + write(6,*)' xlat(1,1),xlat(nlon,1)=',field2(1,1),field2(nlon_regional,1) + write(6,*)' xlat(1,nlat),xlat(nlon,nlat)=', & + field2(1,nlat_regional),field2(nlon_regional,nlat_regional) + field2=field2/rad2deg_single + write(iunit)field2,field2b !XLAT,DX_MC + + rmse_var='XLONG' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' max,min XLONG(:,1)=',maxval(field2(:,1)),minval(field2(:,1)) + write(6,*)' max,min XLONG(1,:)=',maxval(field2(1,:)),minval(field2(1,:)) + write(6,*)' xlong(1,1),xlong(nlon,1)=',field2(1,1),field2(nlon_regional,1) + write(6,*)' xlong(1,nlat),xlong(nlon,nlat)=', & + field2(1,nlat_regional),field2(nlon_regional,nlat_regional) + field2=field2/rad2deg_single + write(iunit)field2,field2c !XLONG,DY_MC + + rmse_var='MUB' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' max,min MUB=',maxval(field2),minval(field2) + + rmse_var='MU' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2b,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + write(6,*)' max,min MU=',maxval(field2b),minval(field2b) + field2=field2b+field2+pt_regional + write(6,*)' max,min psfc0=',maxval(field2),minval(field2) + write(iunit)field2 ! psfc0 + + rmse_var='QVAPOR' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + q_integral=one + do k=1,nsig_regional + write(6,*)' k,max,min,mid q=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + deltasigma = field1(k)-field1(k+1) + do j=1,nlat_regional + do i=1,nlon_regional + q_integral(i,j)=q_integral(i,j) + deltasigma * field3(i,j,k) + enddo + enddo + enddo + field2=q_integral + write(6,*)' k,max,min,mid qall=',k,maxval(field2(:,:)),minval(field2(:,:)), & + field2(nlon_regional/2,nlat_regional/2) + write(iunit)((field2(i,j),i=1,nlon_regional),j=1,nlat_regional) + + rmse_var='PHB' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + k=1 + write(6,*)' k,max,min,mid PHB=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + write(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) !PHB (zsfc*g) + + rmse_var='T' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(6,*)' rmse_var = ',trim(rmse_var),' ndim1=',ndim1 + write(6,*)' WrfType = ',WrfType,' WRF_REAL=',WRF_REAL,'ierr = ',ierr !DEDE + write(6,*)' ordering = ',trim(ordering),' staggering = ',trim(staggering) + write(6,*)' start_index = ',start_index,' end_index = ',end_index + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + field3=field3+h300 + do k=1,nsig_regional + write(6,*)' k,max,min,mid T=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + write(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! POT TEMP (sensible??) + end do + + deallocate(field1,field2,field2b,field2c,ifield2,field3) + close(iunit) + call ext_ncd_ioclose(dh1, Status) + + enddo n_loop + +end subroutine read_netcdf_mass4ens + +subroutine update_netcdf_mass4ens +!$$$ subprogram documentation block +! . . . . +! subprogram: update_netcdf_mass create netcdf format wrf restart file from internal binary file. +! prgmmr: +! +! abstract: create netcdf format wrf restart file from internal binary file +! +! program history log: +! 2004-11-05 treadon - add return code 75 for error stop +! 2004-12-15 treadon - remove get_lun, read guess from file "wrf_inout" +! 2005-12-09 middlecoff - initialize character variable staggering and removed staggering1,staggering2 +! 2006-04-06 middlecoff - added read of SM and SICE to match the writes in wrwrfmass.F90 +! and read in the rest of the fields to match the writes in wrwrfmass.F90 +! 2006-06-09 liu - bug fix: replace SM and SICE with SMOIS and XICE +! 2009-08-14 lueken - update documentation +! 2010-03-29 Hu - add code to update 5 cloud/hydrometeor variables for cloud analysis +! 2008-03-29 Hu - bug fix: replace XICE with SEAICE and +! comment out update for SMOIS (the actually +! variable is Landmask there). +! 2012-01-09 Hu - add code to update START_TIME to analysis time +! 2012-04-13 Whitaker - clip positive definite quantities to tiny_single +! 2014-03-12 hu - add code to read ges_q2 (2m Q), +! Qnr(rain number concentration), +! and nsoil (number of soil levels) +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: +! +!$$$ end documentation block + + use kinds, only: r_single,i_kind,r_kind + use constants, only: h300,tiny_single + use rapidrefresh_cldsurf_mod, only: l_cloud_analysis,l_gsd_soilTQ_nudge + use gsi_metguess_mod, only: gsi_metguess_get,GSI_MetGuess_Bundle + use gsi_bundlemod, only: GSI_BundleGetPointer + use guess_grids, only: ntguessig + use obsmod, only: iadate + + implicit none + + include 'netcdf.inc' + +! Declare local parameters + + character(len=120) :: flnm1,flnm2 + character(len=19) :: DateStr1 + integer(i_kind) :: dh1,iw3jdn + + integer(i_kind) :: iunit + + integer(i_kind) :: i,j,k + integer(i_kind) :: ndim1 + integer(i_kind) :: WrfType + integer(i_kind), dimension(4) :: start_index, end_index1 + character (len= 4) :: staggering=' N/A' + character (len= 3) :: ordering + + character (len=80), dimension(3) :: dimnames + character (len=80) :: SysDepInfo + + + integer(i_kind) :: it, nguess, ierr, istatus, Status, Status_next_time + real(r_kind), pointer :: ges_qc(:,:,:) + real(r_kind), pointer :: ges_qi(:,:,:) + real(r_kind), pointer :: ges_qr(:,:,:) + real(r_kind), pointer :: ges_qs(:,:,:) + real(r_kind), pointer :: ges_qg(:,:,:) + +! binary stuff + +! rmse stuff + + character (len=31) :: rmse_var + + integer(i_kind) iyear,imonth,iday,ihour,iminute,isecond + integer(i_kind) nlon_regional,nlat_regional,nsig_regional,nsig_soil_regional + real(r_single) pt_regional,pdtop_regional,dy_nmm + real(r_single),allocatable::field3(:,:,:),field2(:,:),field1(:),field2b(:,:) + real(r_single),allocatable::field3u(:,:,:),field3v(:,:,:) + integer(i_kind),allocatable::ifield2(:,:) + integer(i_kind) wrf_real + data iunit / 15 / + wrf_real=104 + end_index1=0 + +! Inquire about guess fields + call gsi_metguess_get('dim',nguess,ierr) + if (nguess>0) then +! get pointer to relevant instance of cloud-related backgroud + it=ntguessig + ierr=0 + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'ql', ges_qc, istatus );ierr=ierr+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qi', ges_qi, istatus );ierr=ierr+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qr', ges_qr, istatus );ierr=ierr+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qs', ges_qs, istatus );ierr=ierr+istatus + call GSI_BundleGetPointer ( GSI_MetGuess_Bundle(it), 'qg', ges_qg, istatus );ierr=ierr+istatus + if (ierr/=0) nguess=0 + end if + +! transfer code from diffwrf for converting netcdf wrf nmm restart file +! to temporary binary format + +! +! update mass core netcdf file with analysis variables from 3dvar +! + flnm1='wrf_inout' + call ext_ncd_open_for_update( trim(flnm1), 0, 0, "", dh1, Status) + if ( Status /= 0 )then + write(6,*)'UPDATE_NETCDF_MASS: problem with flnm1 = ',& + trim(flnm1),', Status = ', Status + call stop2(75) + endif + + + close(51) + flnm2='siganl' + open(iunit,file=flnm2,form='unformatted') + + +!------------- get date info + + call ext_ncd_get_next_time(dh1, DateStr1, Status_next_time) + read(DateStr1,'(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,i2)') iyear,imonth,iday,ihour,iminute,isecond + write(6,*)' iy,m,d,h,m,s=',iyear,imonth,iday,ihour,iminute,isecond + +!------------- get grid info + rmse_var='SMOIS' + call ext_ncd_get_var_info (dh1,rmse_var,ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + nlon_regional=end_index1(1) + nlat_regional=end_index1(2) + nsig_soil_regional=end_index1(3) + + rmse_var='T' + call ext_ncd_get_var_info (dh1,rmse_var,ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + nlon_regional=end_index1(1) + nlat_regional=end_index1(2) + nsig_regional=end_index1(3) + write(6,*)' nlon,lat,sig_regional=',nlon_regional,nlat_regional,nsig_regional + allocate(field2(nlon_regional,nlat_regional),field3(nlon_regional,nlat_regional,nsig_regional)) + allocate(field3u(nlon_regional+1,nlat_regional,nsig_regional)) + allocate(field3v(nlon_regional,nlat_regional+1,nsig_regional)) + allocate(field2b(nlon_regional,nlat_regional)) + allocate(ifield2(nlon_regional,nlat_regional)) + allocate(field1(max(nlon_regional,nlat_regional,nsig_regional))) + + rmse_var='P_TOP' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + pt_regional,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + write(6,*)' p_top=',pt_regional + read(iunit) ! iyear,imonth,iday,ihour,iminute,isecond, & +! nlon_regional,nlat_regional,nsig_regional,pt_regional + + read(iunit) ! field1(1:nsig_regional) ! AETA1 (ZNU) + + read(iunit) ! field1(1:nsig_regional+1) ! ETA1 (ZNW) + + read(iunit) ! field2 !XLAT,DX_MC + + read(iunit) ! field2 !XLONG,DY_MC + + rmse_var='MUB' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + write(6,*)' max,min MUB=',maxval(field2),minval(field2) + + read(iunit) field2b !psfc + write(6,*)' max,min psfc=',maxval(field2b),minval(field2b) + field2b=field2b-field2-pt_regional + write(6,*)' max,min MU=',maxval(field2b),minval(field2b) + rmse_var='MU' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2b,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + read(iunit) ! field2 ! PHB (FIS) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! T + write(6,*)' k,max,min,mid T=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + field3=field3-h300 + rmse_var='T' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Q + write(6,*)' k,max,min,mid Q=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QVAPOR' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3u(i,j,k),i=1,nlon_regional+1),j=1,nlat_regional) ! U + write(6,*)' k,max,min,mid U=',k,maxval(field3u(:,:,k)),minval(field3u(:,:,k)), & + field3u(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='U' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3u,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3v(i,j,k),i=1,nlon_regional),j=1,nlat_regional+1) ! V + write(6,*)' k,max,min,mid V=',k,maxval(field3v(:,:,k)),minval(field3v(:,:,k)), & + field3v(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='V' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3v,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + read(iunit) field2 ! LANDMASK + write(6,*)'max,min LANDMASK=',maxval(field2),minval(field2) + + read(iunit) field2 ! SEAICE + write(6,*)'max,min SEAICE=',maxval(field2),minval(field2) + rmse_var='SEAICE' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + read(iunit) field2 !SST + write(6,*)' max,min SST=',maxval(field2),minval(field2) + rmse_var='SST' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + +! Read in the rest of the fields + if(l_gsd_soilTQ_nudge) then + do k=4,9 + read(iunit) field2 !Rest of the fields + write(6,*)'read max,min REST',k,maxval(field2),minval(field2) + end do + + do k=1,nsig_soil_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! smois + write(6,*)' k,max,min,mid SMOIS=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='SMOIS' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_soil_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! tslb + write(6,*)' k,max,min,mid TSLB=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='TSLB' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + else + do k=4,11 ! corrected according to Ming Hu's finding + + read(iunit) field2 !Rest of the fields + write(6,*)'read max,min REST',k,maxval(field2),minval(field2) + end do + endif + + read(iunit) field2 !TSK + write(6,*)' max,min TSK=',maxval(field2),minval(field2) + rmse_var='TSK' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + read(iunit) field2 !Q2 + write(6,*)' max,min Q2=',maxval(field2),minval(field2) + rmse_var='Q2' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + if(l_gsd_soilTQ_nudge) then + read(iunit) field2 !SOILT1 + write(6,*)' max,min SOILT1 d=',maxval(field2),minval(field2) + rmse_var='SOILT1' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + read(iunit) field2 !TH2 + write(6,*)' max,min TH2 d=',maxval(field2),minval(field2) + rmse_var='TH2' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field2,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + endif + + if (l_cloud_analysis .or. nguess>0) then + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qc + write(6,*)' k,max,min,mid Qc=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QCLOUD' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qr + write(6,*)' k,max,min,mid Qr=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QRAIN' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qs + write(6,*)' k,max,min,mid Qs=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QSNOW' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qi + write(6,*)' k,max,min,mid Qi=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QICE' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qg + write(6,*)' k,max,min,mid Qg=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QGRAUP' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! Qnr + write(6,*)' k,max,min,mid Qnr=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='QNRAIN' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + where (field3 < tiny_single) field3 = tiny_single + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + do k=1,nsig_regional + read(iunit)((field3(i,j,k),i=1,nlon_regional),j=1,nlat_regional) ! TTEN + write(6,*)' k,max,min,mid TTEN=',k,maxval(field3(:,:,k)),minval(field3(:,:,k)), & + field3(nlon_regional/2,nlat_regional/2,k) + end do + rmse_var='RAD_TTEN_DFI' + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index1, WrfType, ierr ) + write(6,*)' rmse_var=',trim(rmse_var) + write(6,*)' ordering=',ordering + write(6,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(6,*)' ndim1=',ndim1 + write(6,*)' staggering=',staggering + write(6,*)' start_index=',start_index + write(6,*)' end_index1=',end_index1 + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index1, & !dom + start_index,end_index1, & !mem + start_index,end_index1, & !pat + ierr ) + + endif ! l_cloud_analysis + + deallocate(field1,field2,field2b,ifield2,field3,field3u,field3v) + call ext_ncd_ioclose(dh1, Status) + close(iunit) + ! + ! reopen, update global attributes. + ! + ierr = NF_OPEN(trim(flnm1), NF_WRITE, dh1) + IF (ierr .NE. NF_NOERR) print *, 'OPEN ',NF_STRERROR(ierr) + ierr = NF_PUT_ATT_TEXT(dh1,NF_GLOBAL,'START_DATE',len_trim(DateStr1),DateStr1) + IF (ierr .NE. NF_NOERR) print *,'PUT START_DATE', NF_STRERROR(ierr) + ierr = NF_PUT_ATT_TEXT(dh1,NF_GLOBAL,'SIMULATION_START_DATE',len_trim(DateStr1),DateStr1) + IF (ierr .NE. NF_NOERR) print *,'PUT SIMULATION_START_DATE', NF_STRERROR(ierr) + ierr = NF_PUT_ATT_REAL(dh1,NF_GLOBAL,'GMT',NF_FLOAT,1,float(iadate(4))) + IF (ierr .NE. NF_NOERR) print *,'PUT GMT', NF_STRERROR(ierr) + ierr = NF_PUT_ATT_INT(dh1,NF_GLOBAL,'JULYR',NF_INT,1,iadate(1)) + IF (ierr .NE. NF_NOERR) print *,'PUT JULYR', NF_STRERROR(ierr) + ierr=NF_PUT_ATT_INT(dh1,NF_GLOBAL,'JULDAY',NF_INT,1,iw3jdn(iyear,imonth,iday)-iw3jdn(iyear,1,1)+1) + IF (ierr .NE. NF_NOERR) print *,'PUT JULDAY', NF_STRERROR(ierr) + ierr = NF_CLOSE(dh1) + IF (ierr .NE. NF_NOERR) print *, 'CLOSE ',NF_STRERROR(ierr) + +end subroutine update_netcdf_mass4ens + +#else /* Start no WRF-library block */ + +subroutine read_netcdf_mass4ens +!$$$ subprogram documentation block +! . . . . +! subprogram: convert_netcdf_mass +! pgrmmr: +! +! abstract: dummy call... does nothing +! +! program history log: +! 2009-08-14 lueken - added subprogram doc block +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: +! +!$$$ end documentation block + implicit none + + write(6,*)'CONVERT_NETCDF_MASS: ***WARNING*** dummy call ... does nothing!' + return +end subroutine read_netcdf_mass4ens + +subroutine update_netcdf_mass4ens +!$$$ subprogram documentation block +! . . . . +! subprogram: update_netcdf_mass +! pgrmmr: +! +! abstract: dummy call... does nothing +! +! program history log: +! 2009-08-14 lueken - added subprogram doc block +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: +! +!$$$ end documentation block + implicit none + + write(6,*)'UPDATE_NETCDF_MASS: ***WARNING*** dummy call ... does nothing!' + return +end subroutine update_netcdf_mass4ens + +#endif /* end NO WRF-library block */ + diff --git a/util/EnKF/arw/src/initialens_regional.fd/CMakeLists.txt b/util/EnKF/arw/src/initialens_regional.fd/CMakeLists.txt new file mode 100644 index 000000000..f2064c73b --- /dev/null +++ b/util/EnKF/arw/src/initialens_regional.fd/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required(VERSION 2.6) + file(GLOB LOCAL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.f90) + set_source_files_properties( ${LOCAL_SRC} PROPERTIES COMPILE_FLAGS ${GSI_Fortran_FLAGS} ) + include_directories( "${PROJECT_BINARY_DIR}/include" ${NETCDF_INCLUDES} ${MPI_Fortran_INCLUDE_PATH} ) + + add_executable(initialens.x ${LOCAL_SRC} ) + set_target_properties( initialens.x PROPERTIES COMPILE_FLAGS ${GSI_Fortran_FLAGS} ) + target_link_libraries( initialens.x ${GSISHAREDLIB} ${GSILIB} ${GSISHAREDLIB} ${WRF_LIBRARIES} + ${NETCDF_LIBRARIES_F90} ${NETCDF_LIBRARIES} ${HDF5_Fortran_HL_LIBRARIES} ${MPI_Fortran_LIBRARIES} + ${LAPACK_LIBRARIES} -L./ ${EXTRA_LINKER_FLAGS} ${HDF5_LIBRARIES} ${CURL_LIBRARIES} ${CORE_LIBRARIES} ${CORE_BUILT} + ${GSI_LDFLAGS} ${NCDIAG_LIBRARIES} ${ZLIB_LIBRARIES} ${wrflib} ) + add_dependencies(initialens.x ${GSILIB}) diff --git a/util/EnKF/arw/src/initialens_regional.fd/initial_arw_ens.f90 b/util/EnKF/arw/src/initialens_regional.fd/initial_arw_ens.f90 new file mode 100644 index 000000000..55aa44ac2 --- /dev/null +++ b/util/EnKF/arw/src/initialens_regional.fd/initial_arw_ens.f90 @@ -0,0 +1,322 @@ +program initial_arw_ens +!$$$ subprogram documentation block +! . . . . +! subprogram: initial_arw_ens +! prgmmr: Hu org: GSD date: 2015-03-24 +! +! abstract: read pertubations on ARW A grid and generate initial files for ARW +! ensembles +! +! +! program history log: +! 2015-03-23 Hu , initial documentation +! +! input argument list: +! +! output argument list: +! +! attributes: +! language: f90 +! machine: zeus +! +!$$$ end documentation block + + use mpimod, only: npe,mpi_comm_world,ierror,mype + use mpeu_util, only: die + use kinds, only: r_kind,i_kind,r_single + use constants, only : half +! use constants, only : max_varname_length + implicit none + INCLUDE 'netcdf.inc' + + integer(i_kind),parameter :: max_varname_length=12 + integer :: n_ens + integer(i_kind),allocatable :: beginmem(:),endmem(:) + integer :: nlon,nlat,nsig,num_fields + integer(i_kind) :: nc2d,nc3d + character(len=max_varname_length),allocatable,dimension(:) :: cvars2d + character(len=max_varname_length),allocatable,dimension(:) :: cvars3d + + real(r_single),allocatable,dimension(:,:,:)::en_perts + real(r_kind),dimension(:,:),allocatable:: workh + + real(r_single),allocatable,dimension(:,:,:):: field3 + +! Declare netcdf parameters + + character(len=120) :: flnm1 + character(len=120) :: flnm_new + character(len=19) :: DateStr1 + integer(i_kind) :: dh1 + integer(i_kind) :: dh2 + + integer(i_kind) :: Status, Status_next_time + integer(i_kind) :: iyear,imonth,iday,ihour,iminute,isecond + integer(i_kind) :: iw3jdn,JDATE(8),IDATE(8) + real(r_single) :: rinc(5), timediff + + character (len=80) :: SysDepInfo + character (len=31) :: rmse_var +! +! inflation factor +! + integer, parameter :: infltlvl=100 + real(r_single) :: infltnfct_t(infltlvl),infltnfct_uv(infltlvl),infltnfct_q(infltlvl),infltnfct_ps + real(r_single) :: infltnfct(100) +! +! Declare variables. +! + logical :: ifinflt_column + namelist/setup/ n_ens,ifinflt_column +! +! +! + integer(i_kind) i,j,k,n + integer(i_kind) ic2,ic3 + character(255) filename + integer(i_kind),dimension(4):: idate4 + integer(i_kind) im,i0 + + integer(i_kind) :: iunit,iout + integer :: ios + character(len=80) :: myname_ + character(len=80) :: filestdout + logical :: lexist +! +! +! MPI + call MPI_INIT(ierror) + call mpi_comm_size(mpi_comm_world,npe,ierror) + call mpi_comm_rank(mpi_comm_world,mype,ierror) +! + ifinflt_column=.false. + n_ens=0 +! + allocate(beginmem(npe),endmem(npe)) +! + iout=13 + write(filestdout,'(a,I4.4)') 'stdout_pe',mype+1 + open(iout,file=trim(filestdout)) + open(11,file='namelist.input') + read(11,setup,iostat=ios) + if(ios/=0) call die(myname_,'read(setup)',ios) + close(11) + if(n_ens > 0) then + write(iout,*) 'the ensemble member number==',n_ens + write(iout,*) 'if turn on vertical inflation factor is ==',ifinflt_column + else + write(iout,*) 'wrong ensemble member number==',n_ens + stop + endif +! + infltnfct=1.0_r_single +! + if(ifinflt_column) then + inquire(file='vertical_inflate_factor.txt',exist=lexist) + if(lexist) then + i=1 + open(10,file='vertical_inflate_factor.txt') + read(10,*) +100 continue + read(10,'(I10,f10.4)',end=110) k,infltnfct(i) + i=i+1 + goto 100 +110 continue + close(10) + endif + endif + infltnfct_t=infltnfct + infltnfct_uv=infltnfct + infltnfct_q=infltnfct + infltnfct_ps=infltnfct(1) +! +! figure out the begin and end of member for each core +! + n=n_ens/npe + k=mod(n_ens,npe) + beginmem(1)=1 + do i=1,npe + if(i>1) beginmem(i)=endmem(i-1)+1 + if(i<=k) then + endmem(i)=beginmem(i)+n + else + endmem(i)=beginmem(i)+n-1 + endif + enddo + write(iout,*) 'beginmem=',mype+1,beginmem(mype+1) + write(iout,*) 'endmem=',mype+1,endmem(mype+1) +! open netcdf file to read + call ext_ncd_ioinit(sysdepinfo,status) +! + flnm1='wrf_inout' + call ext_ncd_open_for_read( trim(flnm1), 0, 0, "", dh1, Status) + if ( Status /= 0 )then + write(iout,*)'save_soil_netcdf_mass: cannot open flnm1 = ',& + trim(flnm1),', Status = ', Status + stop 74 + endif +! +!------------- get date info from file read in + + call ext_ncd_get_next_time(dh1, DateStr1, Status_next_time) + read(DateStr1,'(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,i2)') & + iyear,imonth,iday,ihour,iminute,isecond + write(iout,'(a,6I5)')' read data from file at time (y,m,d,h,m,s):' & + ,iyear,imonth,iday,ihour,iminute,isecond +! +! get dimensions + iunit=20 + write(filename,'(a,I4.4)') 'en_perts4arw.mem',1 + write(iout,*) 'read dimension from ', trim(filename) + open(iunit,file=trim(filename),form='unformatted') + read(iunit) nc3d,nc2d + write(*,*) 'dimension is =',nc3d,nc2d + allocate(cvars3d(nc3d),cvars2d(nc2d)) + rewind(iunit) + read(iunit) nc3d,nc2d,cvars3d,cvars2d + read(iunit) nlat,nlon,nsig + close(iunit) + write(iout,*) 'nlat,nlon,nsig=',nlat,nlon,nsig + write(iout,'(I5,A10,10A6)') nc3d,'cvars3d=',(trim(cvars3d(ic3)),ic3=1,nc3d) + write(iout,'(I5,A10,10A6)') nc2d,'cvars2d=',(trim(cvars2d(ic2)),ic2=1,nc2d) + + num_fields=nc3d*nsig+nc2d + allocate(workh(nlat,nlon)) + allocate(en_perts(nlat,nlon,num_fields)) + +! check inflate factor + write(*,*) 'inflate factor' + write(*,'(4a10)') 'level','T', 'UV','q' + do k=1, nsig + write(*,'(I10,3f10.4)') k,infltnfct_t(k),infltnfct_uv(k),infltnfct_q(k) + enddo + write(*,'(a,f10.4)') 'surface pressure inflate factor=',infltnfct_ps + +! +! read perturbations +! + do n=beginmem(mype+1),endmem(mype+1) + + write(filename,'(a,I4.4)') 'en_perts4arw.mem',n + write(iout,*) + write(iout,*) 'read perturbations for ', trim(filename) + open(iunit,file=trim(filename),form='unformatted') + read(iunit) + read(iunit) + + do k=1,num_fields + + read(iunit) workh +! write(*,*) k,maxval(workh),minval(workh) + do j=1,nlon + do i=1,nlat + en_perts(i,j,k)=workh(i,j) + end do + end do + + end do + + close(iunit) + + write(flnm_new,'(a,I4.4)') 'wrfinput_d01.mem',n + call ext_ncd_open_for_update( trim(flnm_new), 0, 0, "", dh2, Status) + if ( Status /= 0 )then + write(iout,*)'gen_initial_ensemble: cannot open flnm = ',& + trim(flnm_new),', Status = ', Status + stop 74 + endif + + rmse_var='T' + allocate(field3(nlon,nlat,nsig)) + call read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) + do k=1,nsig + do j=1,nlon + do i=1,nlat + field3(j,i,k)=field3(j,i,k)+en_perts(i,j,k+2*nsig)*infltnfct_t(k) + end do + end do + end do + call update_netcdf_mass(dh2,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) + deallocate(field3) + + rmse_var='U' + allocate(field3(nlon+1,nlat,nsig)) + call read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon+1,nlat,nsig,iout) + do k=1,nsig + do j=1,nlon+1 + do i=1,nlat + im=max(1,j-1) + i0=min(nlon,j) + field3(j,i,k)=field3(j,i,k)+& + half*(en_perts(i,im,k)+en_perts(i,i0,k))*infltnfct_uv(k) + end do + end do + end do + call update_netcdf_mass(dh2,DateStr1,rmse_var,field3,nlon+1,nlat,nsig,iout) + deallocate(field3) + + rmse_var='V' + allocate(field3(nlon,nlat+1,nsig)) + call read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat+1,nsig,iout) + do k=1,nsig + do j=1,nlon + do i=1,nlat+1 + im=max(1,i-1) + i0=min(nlon,i) + field3(j,i,k)=field3(j,i,k)+& + half*(en_perts(im,j,k)+en_perts(i0,j,k))*infltnfct_uv(k) + end do + end do + end do + call update_netcdf_mass(dh2,DateStr1,rmse_var,field3,nlon,nlat+1,nsig,iout) + deallocate(field3) + + rmse_var='QVAPOR' + allocate(field3(nlon,nlat,nsig)) + call read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) + do k=1,nsig + do j=1,nlon + do i=1,nlat + field3(j,i,k)=field3(j,i,k)+en_perts(i,j,k+3*nsig)*infltnfct_q(k) + end do + end do + end do + call update_netcdf_mass(dh2,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) + deallocate(field3) + + rmse_var='MU' + allocate(field3(nlon,nlat,1)) + call read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat,1,iout) + do k=1,1 + do j=1,nlon + do i=1,nlat + field3(j,i,k)=field3(j,i,k)+en_perts(i,j,k+6*nsig)*infltnfct_ps + end do + end do + end do + call update_netcdf_mass(dh2,DateStr1,rmse_var,field3,nlon,nlat,1,iout) + deallocate(field3) + + call ext_ncd_ioclose(dh2, Status) + enddo ! n + + deallocate(workh) +! + call ext_ncd_ioclose(dh1, Status) + + call mpi_finalize(ierror) + +end program initial_arw_ens + +SUBROUTINE wrf_debug( level , str ) + IMPLICIT NONE + CHARACTER*(*) str + INTEGER , INTENT (IN) :: level + INTEGER :: debug_level + CHARACTER (LEN=256) :: time_str + CHARACTER (LEN=256) :: grid_str + CHARACTER (LEN=512) :: out_str + write(*,*) 'wrf_debug called !' + RETURN +END SUBROUTINE wrf_debug + diff --git a/util/EnKF/arw/src/initialens_regional.fd/namelist.input b/util/EnKF/arw/src/initialens_regional.fd/namelist.input new file mode 100644 index 000000000..f0fcb8809 --- /dev/null +++ b/util/EnKF/arw/src/initialens_regional.fd/namelist.input @@ -0,0 +1,3 @@ + &SETUP + n_ens=5, + / diff --git a/util/EnKF/arw/src/initialens_regional.fd/read_netcdf_mass.f90 b/util/EnKF/arw/src/initialens_regional.fd/read_netcdf_mass.f90 new file mode 100755 index 000000000..ae8656885 --- /dev/null +++ b/util/EnKF/arw/src/initialens_regional.fd/read_netcdf_mass.f90 @@ -0,0 +1,95 @@ +subroutine read_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) +!$$$ documentation block +! . . . . +! update_netcdf_mass: read one variable from netcdf file and +! and write it into another netcdf file +! +! prgmmr: Ming Hu date: 2009-01-16 +! +! program history log: +! +! input argument list: +! dh1 : handle of file read in +! DateStr1 : time string of file read in +! rmse_var : variable updated +! +! attributes: +! language: f90 +! +!$$$ + + use kinds, only: r_single,i_kind, r_kind + implicit none + +! + integer(i_kind), intent(in) :: iout + integer(i_kind), intent(in) :: dh1 + character (len=31),intent(in) :: rmse_var + character(len=19), intent(in) :: DateStr1 + integer(i_kind), intent(in) :: nlon,nlat,nsig + real(r_single), intent(out) :: field3(nlon,nlat,nsig) + +! rmse stuff + integer(i_kind) :: ndim1 + integer(i_kind) :: WrfType + integer(i_kind), dimension(4) :: start_index, end_index + character (len= 4) :: staggering + character (len= 3) :: ordering + + character (len=80), dimension(3) :: dimnames + integer(i_kind) wrf_real + +! Declare local parameters + integer(i_kind) nlon_regional,nlat_regional,nsig_regional + + integer(i_kind) :: k + integer(i_kind) :: ierr +! +! +! + write(iout,*) + write(iout,*) ' ================== ' + write(iout,*) ' Read variable ', trim(rmse_var) + write(iout,*) ' ================== ' + + wrf_real=104_i_kind +!------------- get grid info + + end_index=0 + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(iout,*)' <<<<<<<<<<<<<< Read in data from dh1 = ',dh1 + write(iout,*)' rmse_var=',trim(rmse_var) + write(iout,*)' ordering=',ordering + write(iout,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(iout,*)' ndim1=',ndim1 + write(iout,*)' staggering=',staggering + write(iout,*)' start_index=',start_index + write(iout,*)' end_index=',end_index + write(iout,*)'ierr = ',ierr !DEDE + nlon_regional=end_index(1) + nlat_regional=end_index(2) + nsig_regional=end_index(3) + if(ndim1 == 2) nsig_regional=1 + if( nlon_regional /= nlon .or. & + nlat_regional /= nlat .or. & + nsig_regional /= nsig) then + + write(iout,*) 'update_netcdf_mass: Wrong dimension ' + stop 123 + endif + + call ext_ncd_read_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + +! do k=1,nsig_regional +! write(6,*)' max,min =',k,maxval(field3(:,:,k)),minval(field3(:,:,k)) +! enddo + +end subroutine read_netcdf_mass + diff --git a/util/EnKF/arw/src/initialens_regional.fd/update_netcdf_mass.f90 b/util/EnKF/arw/src/initialens_regional.fd/update_netcdf_mass.f90 new file mode 100755 index 000000000..f088d8dc6 --- /dev/null +++ b/util/EnKF/arw/src/initialens_regional.fd/update_netcdf_mass.f90 @@ -0,0 +1,95 @@ +subroutine update_netcdf_mass(dh1,DateStr1,rmse_var,field3,nlon,nlat,nsig,iout) +!$$$ documentation block +! . . . . +! update_netcdf_mass: read one variable from netcdf file and +! and write it into another netcdf file +! +! prgmmr: Ming Hu date: 2009-01-16 +! +! program history log: +! +! input argument list: +! dh1 : handle of file read in +! DateStr1 : time string of file read in +! rmse_var : variable updated +! +! attributes: +! language: f90 +! +!$$$ + + use kinds, only: r_single,i_kind, r_kind + implicit none + +! + integer(i_kind), intent(in) :: dh1 + integer(i_kind), intent(in) :: iout + character (len=31),intent(in) :: rmse_var + character(len=19), intent(in) :: DateStr1 + integer(i_kind), intent(in) :: nlon,nlat,nsig + real(r_single), intent(in) :: field3(nlon,nlat,nsig) + +! rmse stuff + integer(i_kind) :: ndim1 + integer(i_kind) :: WrfType + integer(i_kind), dimension(4) :: start_index, end_index + character (len= 4) :: staggering + character (len= 3) :: ordering + + character (len=80), dimension(3) :: dimnames + integer(i_kind) wrf_real + +! Declare local parameters + integer(i_kind) nlon_regional,nlat_regional,nsig_regional + + integer(i_kind) :: k + integer(i_kind) :: ierr +! +! +! + write(iout,*) + write(iout,*) ' ================== ' + write(iout,*) ' write variable ', trim(rmse_var) + write(iout,*) ' ================== ' + +! do k=1,nsig +! write(6,*)' max,min =',k,maxval(field3(:,:,k)),minval(field3(:,:,k)) +! enddo + + wrf_real=104_i_kind +!------------- get grid info + + end_index=0 + call ext_ncd_get_var_info (dh1,trim(rmse_var),ndim1,ordering,staggering, & + start_index,end_index, WrfType, ierr ) + write(iout,*)' <<<<<<<<<<<<<< write in data to dh1 = ',dh1 + write(iout,*)' rmse_var=',trim(rmse_var) + write(iout,*)' ordering=',ordering + write(iout,*)' WrfType,WRF_REAL=',WrfType,WRF_REAL + write(iout,*)' ndim1=',ndim1 + write(iout,*)' staggering=',staggering + write(iout,*)' start_index=',start_index + write(iout,*)' end_index=',end_index + write(iout,*)'ierr = ',ierr !DEDE + nlon_regional=end_index(1) + nlat_regional=end_index(2) + nsig_regional=end_index(3) + if(ndim1 == 2) nsig_regional=1 + if( nlon_regional /= nlon .or. & + nlat_regional /= nlat .or. & + nsig_regional /= nsig) then + + write(iout,*) 'update_netcdf_mass: Wrong dimension ' + stop 123 + endif + + call ext_ncd_write_field(dh1,DateStr1,TRIM(rmse_var), & + field3,WRF_REAL,0,0,0,ordering, & + staggering, dimnames , & + start_index,end_index, & !dom + start_index,end_index, & !mem + start_index,end_index, & !pat + ierr ) + +end subroutine update_netcdf_mass + diff --git a/util/gsienvreport.sh b/util/gsienvreport.sh new file mode 100755 index 000000000..1d9d86492 --- /dev/null +++ b/util/gsienvreport.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# by Guoqing Ge, 2018/09/10 +# +echo " +-------------------!!! Disclaimer !!!------------------------------------------ + This shell script will report some enviromental information + about the host on which you plan to compile/run GSI/EnKF. + The information will be displayed to the screen for you to review. + You will decide what information is appropriate to be sent to helpdesk. + The helpdesk does not collect any personal information. +------------------------------------------------------------------------------- + " + +echo "comgsi version : v3.7" +cat /proc/cpuinfo | grep 'vendor' | uniq +cat /proc/cpuinfo | grep 'model name' | uniq +echo "Processors : `cat /proc/cpuinfo | grep processor | wc -l`" +echo "Current host : $HOST" +echo "Current OS : `uname`" +echo "Current shell : `ps -o comm= $PPID`" +echo "NETCDF : $NETCDF" + +# try to find a working Fortran compiler in current enviroment +echo +compiler="ifort" +echo "[INTEL]: +`${compiler} --version 2>/dev/null`" +echo +compiler="pgfortran" +echo "[PGI]: +`${compiler} --version 2>/dev/null`" +echo +compiler="gfortran" +echo "[GNU]: +`${compiler} --version 2>/dev/null`" + +## check whether mpirun, mpirun.lsf or mpiexec exist +echo +mpi=`which mpirun 2>/dev/null` +echo "mpirun -- ${mpi}" +mpi=`which mpirun.lsf 2>/dev/null` +echo "mpirun.lsf -- ${mpi}" +mpi=`which mpiexec 2>/dev/null` +echo "mpiexec -- ${mpi}" +RMS=`which qsub 2>/dev/null` +echo "qsub -- ${RMS}" +RMS=`which bsub 2>/dev/null` +echo "bsub -- ${RMS}" +RMS=`which sbatch 2>/dev/null` +echo "sbatch -- ${RMS}" + +## check the host operating system +echo " +The operation system and version information: " +cat /proc/version 2>/dev/null; cat /etc/*release 2>/dev/null; cat /etc/*version 2>/dev/null diff --git a/util/ndate/CMakeLists.txt b/util/ndate/CMakeLists.txt index f9fc2eb8a..0266aee50 100644 --- a/util/ndate/CMakeLists.txt +++ b/util/ndate/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 2.6) set(NDATE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/ndate.f ${W3NCO_DIR}/w3valdat.f ${W3NCO_DIR}/errexit.f ${W3NCO_DIR}/errmsg.f ${W3NCO_DIR}/w3reddat.f ${W3NCO_DIR}/iw3jdn.f ${W3NCO_DIR}/w3movdat.f ${W3NCO_DIR}/w3fs26.f ${W3NCO_DIR}/w3utcdat.f ) - set(NDATE_Fortran_FLAGS "-fp-model source -ftz -assume byterecl -convert big_endian -heap-arrays -DCOMMCODE -DLINUX -DUPPLITTLEENDIAN -O3 -Wl,-noinhibit-exec ") +# set(NDATE_Fortran_FLAGS "-fp-model source -ftz -assume byterecl -convert big_endian -heap-arrays -DCOMMCODE -DLINUX -DUPPLITTLEENDIAN -O3 -Wl,-noinhibit-exec ") set_source_files_properties( ${NDATE_SRC} PROPERTIES COMPILE_FLAGS ${NDATE_Fortran_FLAGS} ) add_executable(ndate.x ${NDATE_SRC} ) set_target_properties( ndate.x PROPERTIES COMPILE_FLAGS ${NDATE_Fortran_FLAGS} ) diff --git a/util/radar_process/radialwind/CMakeLists.txt b/util/radar_process/radialwind/CMakeLists.txt new file mode 100644 index 000000000..ef551a5fd --- /dev/null +++ b/util/radar_process/radialwind/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required(VERSION 2.6) +# set(CMAKE_Fortran_LIB_DIRECTORY "${PROJECT_BINARY_DIR}/lib") +# + file(GLOB LOCAL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.f90) + set_source_files_properties( ${LOCAL_SRC} PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + + add_executable(bufr_decode_l2rwbufr.x bufr_decode_l2rwbufr.f90 ) + set_target_properties( bufr_decode_l2rwbufr.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + target_link_libraries( bufr_decode_l2rwbufr.x ${BUFR_LIBRARY} ) + add_dependencies(bufr_decode_l2rwbufr.x ${BUFR_LIBRARY}) + + add_executable(bufr_encode_l2rwbufr.x bufr_encode_l2rwbufr.f90 ) + set_target_properties( bufr_encode_l2rwbufr.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + target_link_libraries( bufr_encode_l2rwbufr.x ${BUFR_LIBRARY} ) + add_dependencies(bufr_encode_l2rwbufr.x ${BUFR_LIBRARY} ) + + add_executable(bufr_encode_radarbufr.x bufr_encode_radarbufr.f90 ) + set_target_properties( bufr_encode_radarbufr.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + target_link_libraries( bufr_encode_radarbufr.x ${BUFR_LIBRARY} ) + add_dependencies(bufr_encode_radarbufr.x ${BUFR_LIBRARY} ) diff --git a/util/radar_process/radialwind/makefile b/util/radar_process/radialwind/makefile deleted file mode 100644 index 6fe47fe22..000000000 --- a/util/radar_process/radialwind/makefile +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/sh -v - -# Define the fortran compiler and options -# -include ../../../configure.gsi - -FC = ${SFC} -FLAGS= -O2 -INCLD= -I../../../include -LIBS = -L../../../lib -lbufr_i4r8 - -all: bufr_decode_l2rwbufr bufr_encode_l2rwbufr bufr_encode_radarbufr - -work: bufr_encode_l2rwbufr - -bufr_decode_l2rwbufr: bufr_decode_l2rwbufr.o - ${FC} -o bufr_decode_l2rwbufr.exe ${FLAGS} bufr_decode_l2rwbufr.o ${LIBS} - rm bufr_decode_l2rwbufr.o - -bufr_encode_l2rwbufr: bufr_encode_l2rwbufr.o - ${FC} -o bufr_encode_l2rwbufr.exe ${FLAGS} bufr_encode_l2rwbufr.o ${LIBS} - rm bufr_encode_l2rwbufr.o - -bufr_encode_radarbufr: bufr_encode_radarbufr.o - ${FC} -o bufr_encode_radarbufr.exe ${FLAGS} bufr_encode_radarbufr.o ${LIBS} - rm bufr_encode_radarbufr.o - -.SUFFIXES : .f90 .o - -.f90.o : - ${FC} ${FLAGS} ${INCLD} -c $< - -clean: - /bin/rm -f *.o *.exe diff --git a/util/radar_process/reflectivity/CMakeLists.txt b/util/radar_process/reflectivity/CMakeLists.txt new file mode 100644 index 000000000..c39dcc191 --- /dev/null +++ b/util/radar_process/reflectivity/CMakeLists.txt @@ -0,0 +1,9 @@ +cmake_minimum_required(VERSION 2.6) + file(GLOB LOCAL_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.f90) + set_source_files_properties( ${LOCAL_SRC} PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + include_directories( ${NETCDF_INCLUDES} ${MPI_Fortran_INCLUDE_PATH} ) + + add_executable(process_NSSL_mosaic.x ${LOCAL_SRC} ) + set_target_properties( process_NSSL_mosaic.x PROPERTIES COMPILE_FLAGS ${UTIL_COM_Fortran_FLAGS} ) + target_link_libraries( process_NSSL_mosaic.x ${BUFR_LIBRARY} ${NETCDF_LIBRARIES} ${MPI_Fortran_LIBRARIES} ) + add_dependencies(process_NSSL_mosaic.x ${BUFR_LIBRARY} ${GSILIB}) diff --git a/util/radar_process/reflectivity/makefile b/util/radar_process/reflectivity/makefile deleted file mode 100644 index 0862ada67..000000000 --- a/util/radar_process/reflectivity/makefile +++ /dev/null @@ -1,29 +0,0 @@ -# makefile for nssl_porcess -#!/bin/sh -v - -# Define the fortran compiler and options -# - -include ../../../configure.gsi - -FC = ${DM_FC} -FLAGS= -fp-model precise -assume byterecl -fpe0 -g -traceback - -INCLD= -I../../../include -I${NETCDF}/include -LIBS = -L../../../lib -lbufr_i4r8 -L${NETCDF}/lib -lnetcdf - -OBJS = write_bufr_ref.o process_NSSL_mosaic.o \ - read_nssl_binary.o nc_subs.o - -all: process_NSSL_mosaic - -process_NSSL_mosaic: ${OBJS} - ${FC} -o process_NSSL_mosaic.exe ${FLAGS} ${OBJS} ${LIBS} - -.SUFFIXES : .f90 .o - -.f90.o : - ${FC} ${FLAGS} ${INCLD} -c $< - -clean: - /bin/rm -f *.o *.exe