diff --git a/.gitignore b/.gitignore
index e73744aaba..869c78107c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -52,10 +52,16 @@ parm/post/nam_micro_lookup.dat
parm/post/optics_luts_DUST.dat
parm/post/gtg.config.gfs
parm/post/gtg_imprintings.txt
+parm/post/optics_luts_DUST_nasa.dat
+parm/post/optics_luts_NITR_nasa.dat
parm/post/optics_luts_SALT.dat
+parm/post/optics_luts_SALT_nasa.dat
parm/post/optics_luts_SOOT.dat
+parm/post/optics_luts_SOOT_nasa.dat
parm/post/optics_luts_SUSO.dat
+parm/post/optics_luts_SUSO_nasa.dat
parm/post/optics_luts_WASO.dat
+parm/post/optics_luts_WASO_nasa.dat
parm/post/params_grib2_tbl_new
parm/post/post_tag_gfs128
parm/post/post_tag_gfs65
@@ -77,6 +83,9 @@ parm/post/postcntrl_gfs_wafs.xml
parm/post/postcntrl_gfs_wafs_anl.xml
parm/post/postxconfig-NT-GEFS-ANL.txt
parm/post/postxconfig-NT-GEFS-F00.txt
+parm/post/postxconfig-NT-GEFS-F00-aerosol.txt
+parm/post/postxconfig-NT-GEFS-WAFS.txt
+parm/post/postxconfig-NT-GEFS-aerosol.txt
parm/post/postxconfig-NT-GEFS.txt
parm/post/postxconfig-NT-GFS-ANL.txt
parm/post/postxconfig-NT-GFS-F00-TWO.txt
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000000..c591aae70f
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,188 @@
+def Machine = 'none'
+def machine = 'none'
+def HOME = 'none'
+def localworkspace = 'none'
+def commonworkspace = 'none'
+
+pipeline {
+ agent { label 'built-in' }
+
+ options {
+ skipDefaultCheckout()
+ buildDiscarder(logRotator(numToKeepStr: '2'))
+ }
+
+ stages { // This initial stage is used to get the Machine name from the GitHub labels on the PR
+ // which is used to designate the Nodes in the Jenkins Controler by the agent label
+ // Each Jenknis Node is connected to said machine via an JAVA agent via an ssh tunnel
+
+ stage('Get Machine') {
+ agent { label 'built-in' }
+ steps {
+ script {
+ localworkspace = env.WORKSPACE
+ machine = 'none'
+ for (label in pullRequest.labels) {
+ echo "Label: ${label}"
+ if ((label.matches("CI-Hera-Ready"))) {
+ machine = 'hera'
+ } else if ((label.matches("CI-Orion-Ready"))) {
+ machine = 'orion'
+ } else if ((label.matches("CI-Hercules-Ready"))) {
+ machine = 'hercules'
+ }
+ } // createing a second machine varible with first letter capital
+ // because the first letter of the machine name is captitalized in the GitHub labels
+ Machine = machine[0].toUpperCase() + machine.substring(1)
+ }
+ }
+ }
+
+ stage('Get Common Workspace') {
+ agent { label "${machine}-emc" }
+ steps {
+ script {
+ properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in','Hera-EMC','Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])])
+ HOME = "${WORKSPACE}/TESTDIR"
+ commonworkspace = "${WORKSPACE}"
+ sh( script: "mkdir -p ${HOME}/RUNTESTS", returnStatus: true)
+ pullRequest.addLabel("CI-${Machine}-Building")
+ if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Ready") } ) {
+ pullRequest.removeLabel("CI-${Machine}-Ready")
+ }
+ }
+ }
+ }
+
+ stage('Build System') {
+ matrix {
+ agent { label "${machine}-emc" }
+ //options {
+ // throttle(['global_matrix_build'])
+ //}
+ axes {
+ axis {
+ name "system"
+ values "gfs", "gefs"
+ }
+ }
+ stages {
+ stage("build system") {
+ steps {
+ script {
+ def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to build the system on per system basis under the common workspace HOME
+ sh( script: "mkdir -p ${HOMEgfs}", returnStatus: true)
+ ws(HOMEgfs) {
+ env.MACHINE_ID = machine // MACHINE_ID is used in the build scripts to determine the machine and is added to the shell environment
+ if (fileExists("${HOMEgfs}/sorc/BUILT_semaphor")) { // if the system is already built, skip the build in the case of re-runs
+ sh( script: "cat ${HOMEgfs}/sorc/BUILT_semaphor", returnStdout: true).trim() // TODO: and user configurable control to manage build semphore
+ ws(commonworkspace) { pullRequest.comment("Cloned PR already built (or build skipped) on ${machine} in directory ${HOMEgfs}") }
+ } else {
+ checkout scm
+ sh( script: "source workflow/gw_setup.sh;which git;git --version;git submodule update --init --recursive", returnStatus: true)
+ def builds_file = readYaml file: "ci/cases/yamls/build.yaml"
+ def build_args_list = builds_file['builds']
+ def build_args = build_args_list[system].join(" ").trim().replaceAll("null", "")
+ dir("${HOMEgfs}/sorc") {
+ sh( script: "${build_args}", returnStatus: true)
+ sh( script: "./link_workflow.sh", returnStatus: true)
+ sh( script: "echo ${HOMEgfs} > BUILT_semaphor", returnStatus: true)
+ }
+ }
+ if ( pullRequest.labels.any{ value -> value.matches("CI-${Machine}-Building") } ) {
+ pullRequest.removeLabel("CI-${Machine}-Building")
+ }
+ pullRequest.addLabel("CI-${Machine}-Running")
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ stage('Run Tests') {
+ matrix {
+ agent { label "${machine}-emc" }
+ axes {
+ axis {
+ name "Case"
+ values "C48_ATM", "C48_S2SWA_gefs", "C48_S2SW", "C96_atm3DVar" // TODO add dynamic list of cases from env vars (needs addtional plugins)
+ }
+ }
+ stages {
+ stage('Create Experiment') {
+ steps {
+ script {
+ sh( script: "sed -n '/{.*}/!p' ${HOME}/gfs/ci/cases/pr/${Case}.yaml > ${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp", returnStatus: true)
+ def yaml_case = readYaml file: "${HOME}/gfs/ci/cases/pr/${Case}.yaml.tmp"
+ system = yaml_case.experiment.system
+ def HOMEgfs = "${HOME}/${system}" // local HOMEgfs is used to populate the XML on per system basis
+ env.RUNTESTS = "${HOME}/RUNTESTS"
+ sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh create_experiment ${HOMEgfs}/ci/cases/pr/${Case}.yaml", returnStatus: true)
+ }
+ }
+ }
+ stage('Run Experiments') {
+ steps {
+ script {
+ HOMEgfs = "${HOME}/gfs" // common HOMEgfs is used to launch the scripts that run the experiments
+ ws(HOMEgfs) {
+ pslot = sh( script: "${HOMEgfs}/ci/scripts/utils/ci_utils_wrapper.sh get_pslot ${HOME}/RUNTESTS ${Case}", returnStdout: true ).trim()
+ pullRequest.comment("**Running experiments: ${Case} on ${Machine}**
Built against system **${system}** in directory:
`${HOMEgfs}`
With the experiment in directory:
`${HOME}/RUNTESTS/${pslot}`")
+ try {
+ sh( script: "${HOMEgfs}/ci/scripts/run-check_ci.sh ${HOME} ${pslot}", returnStatus: true)
+ } catch (Exception e) {
+ pullRequest.comment("**FAILURE** running experiments: ${Case} on ${Machine}")
+ error("Failed to run experiments ${Case} on ${Machine}")
+ }
+ pullRequest.comment("**SUCCESS** running experiments: ${Case} on ${Machine}")
+ }
+ }
+ }
+ post {
+ always {
+ script {
+ ws (HOMEgfs) {
+ for (label in pullRequest.labels) {
+ if (label.contains("${Machine}")) {
+ pullRequest.removeLabel(label)
+ }
+ }
+ }
+ }
+ }
+ success {
+ script {
+ ws (HOMEgfs) {
+ pullRequest.addLabel("CI-${Machine}-Passed")
+ def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York'))
+ pullRequest.comment("**CI SUCCESS** ${Machine} at ${timestamp}\n\nBuilt and ran in directory `${HOME}`")
+ }
+ }
+ }
+ failure {
+ script {
+ ws (HOMEgfs) {
+ pullRequest.addLabel("CI-${Machine}-Failed")
+ def timestamp = new Date().format("MM dd HH:mm:ss", TimeZone.getTimeZone('America/New_York'))
+ pullRequest.comment("**CI FAILED** ${Machine} at ${timestamp}
Built and ran in directory `${HOME}`")
+ if (fileExists('${HOME}/RUNTESTS/ci.log')) {
+ def fileContent = readFile '${HOME}/RUNTESTS/ci.log'
+ fileContent.eachLine { line ->
+ if( line.contains(".log")) {
+ archiveArtifacts artifacts: "${line}", fingerprint: true
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+}
diff --git a/ci/cases/pr/C48_ATM.yaml b/ci/cases/pr/C48_ATM.yaml
index 39412e8aeb..79706556e6 100644
--- a/ci/cases/pr/C48_ATM.yaml
+++ b/ci/cases/pr/C48_ATM.yaml
@@ -10,4 +10,4 @@ arguments:
expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
idate: 2021032312
edate: 2021032312
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/pr/C48_S2SW.yaml b/ci/cases/pr/C48_S2SW.yaml
index 2aba42f562..6367564514 100644
--- a/ci/cases/pr/C48_S2SW.yaml
+++ b/ci/cases/pr/C48_S2SW.yaml
@@ -11,4 +11,4 @@ arguments:
expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
idate: 2021032312
edate: 2021032312
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/pr/C48_S2SWA_gefs.yaml b/ci/cases/pr/C48_S2SWA_gefs.yaml
index d68360bf44..d42f4cd15b 100644
--- a/ci/cases/pr/C48_S2SWA_gefs.yaml
+++ b/ci/cases/pr/C48_S2SWA_gefs.yaml
@@ -15,4 +15,4 @@ arguments:
expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
idate: 2021032312
edate: 2021032312
- yaml: {{ HOMEgfs }}/ci/platforms/gefs_ci_defaults.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gefs_ci_defaults.yaml
diff --git a/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml
new file mode 100644
index 0000000000..b972d3a445
--- /dev/null
+++ b/ci/cases/pr/C48mx500_3DVarAOWCDA.yaml
@@ -0,0 +1,22 @@
+experiment:
+ system: gfs
+ mode: cycled
+
+arguments:
+ pslot: {{ 'pslot' | getenv }}
+ app: S2S
+ resdetatmos: 48
+ resdetocean: 5.0
+ comroot: {{ 'RUNTESTS' | getenv }}/COMROOT
+ expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
+ icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C48mx500
+ idate: 2021032412
+ edate: 2021032418
+ nens: 0
+ gfs_cyc: 0
+ start: warm
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/soca_gfs_defaults_ci.yaml
+
+skip_ci_on_hosts:
+ - orion
+ - hercules
diff --git a/ci/cases/pr/C96C48_hybatmDA.yaml b/ci/cases/pr/C96C48_hybatmDA.yaml
index be35283cff..d08374d4e0 100644
--- a/ci/cases/pr/C96C48_hybatmDA.yaml
+++ b/ci/cases/pr/C96C48_hybatmDA.yaml
@@ -16,4 +16,4 @@ arguments:
nens: 2
gfs_cyc: 1
start: cold
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/pr/C96_atm3DVar.yaml b/ci/cases/pr/C96_atm3DVar.yaml
index dee1525d80..d992938f7f 100644
--- a/ci/cases/pr/C96_atm3DVar.yaml
+++ b/ci/cases/pr/C96_atm3DVar.yaml
@@ -14,4 +14,4 @@ arguments:
nens: 0
gfs_cyc: 1
start: cold
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/pr/C96_atmsnowDA.yaml b/ci/cases/pr/C96_atmsnowDA.yaml
new file mode 100644
index 0000000000..35fcc10fb2
--- /dev/null
+++ b/ci/cases/pr/C96_atmsnowDA.yaml
@@ -0,0 +1,21 @@
+experiment:
+ system: gfs
+ mode: cycled
+
+arguments:
+ pslot: {{ 'pslot' | getenv }}
+ app: ATM
+ resdetatmos: 96
+ comroot: {{ 'RUNTESTS' | getenv }}/COMROOT
+ expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
+ icsdir: {{ 'ICSDIR_ROOT' | getenv }}/C96C48
+ idate: 2021122012
+ edate: 2021122100
+ nens: 0
+ gfs_cyc: 1
+ start: cold
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/atmsnowDA_defaults_ci.yaml
+
+skip_ci_on_hosts:
+ - orion
+ - hercules
diff --git a/ci/cases/weekly/C384C192_hybatmda.yaml b/ci/cases/weekly/C384C192_hybatmda.yaml
index a4eae7d9a1..131ada95d5 100644
--- a/ci/cases/weekly/C384C192_hybatmda.yaml
+++ b/ci/cases/weekly/C384C192_hybatmda.yaml
@@ -16,4 +16,4 @@ arguments:
nens: 2
gfs_cyc: 1
start: cold
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/weekly/C384_S2SWA.yaml b/ci/cases/weekly/C384_S2SWA.yaml
index 813188015f..7bbdc44671 100644
--- a/ci/cases/weekly/C384_S2SWA.yaml
+++ b/ci/cases/weekly/C384_S2SWA.yaml
@@ -11,4 +11,4 @@ arguments:
expdir: {{ 'RUNTESTS' | getenv }}/EXPDIR
idate: 2016070100
edate: 2016070100
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/weekly/C384_atm3DVar.yaml b/ci/cases/weekly/C384_atm3DVar.yaml
index 479d731b25..40487f3b47 100644
--- a/ci/cases/weekly/C384_atm3DVar.yaml
+++ b/ci/cases/weekly/C384_atm3DVar.yaml
@@ -16,4 +16,4 @@ arguments:
nens: 0
gfs_cyc: 1
start: cold
- yaml: {{ HOMEgfs }}/ci/platforms/gfs_defaults_ci.yaml
+ yaml: {{ HOMEgfs }}/ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/yamls/atmsnowDA_defaults_ci.yaml b/ci/cases/yamls/atmsnowDA_defaults_ci.yaml
new file mode 100644
index 0000000000..f805902931
--- /dev/null
+++ b/ci/cases/yamls/atmsnowDA_defaults_ci.yaml
@@ -0,0 +1,6 @@
+defaults:
+ !INC {{ HOMEgfs }}/parm/config/gfs/yaml/defaults.yaml
+base:
+ DOIAU: "NO"
+ DO_JEDILANDDA: "YES"
+ ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }}
diff --git a/ci/cases/yamls/build.yaml b/ci/cases/yamls/build.yaml
new file mode 100644
index 0000000000..5398fa1889
--- /dev/null
+++ b/ci/cases/yamls/build.yaml
@@ -0,0 +1,3 @@
+builds:
+ - gefs: './build_all.sh'
+ - gfs: './build_all.sh -gu'
\ No newline at end of file
diff --git a/ci/platforms/gefs_ci_defaults.yaml b/ci/cases/yamls/gefs_ci_defaults.yaml
similarity index 100%
rename from ci/platforms/gefs_ci_defaults.yaml
rename to ci/cases/yamls/gefs_ci_defaults.yaml
diff --git a/ci/platforms/gfs_defaults_ci.yaml b/ci/cases/yamls/gfs_defaults_ci.yaml
similarity index 100%
rename from ci/platforms/gfs_defaults_ci.yaml
rename to ci/cases/yamls/gfs_defaults_ci.yaml
diff --git a/ci/cases/yamls/soca_gfs_defaults_ci.yaml b/ci/cases/yamls/soca_gfs_defaults_ci.yaml
new file mode 100644
index 0000000000..126637cd86
--- /dev/null
+++ b/ci/cases/yamls/soca_gfs_defaults_ci.yaml
@@ -0,0 +1,5 @@
+defaults:
+ !INC {{ HOMEgfs }}/parm/config/gfs/yaml/defaults.yaml
+base:
+ ACCOUNT: {{ 'SLURM_ACCOUNT' | getenv }}
+ DO_JEDIOCNVAR: "YES"
diff --git a/ci/scripts/run-check_ci.sh b/ci/scripts/run-check_ci.sh
index 5a909c1c64..f98f434462 100755
--- a/ci/scripts/run-check_ci.sh
+++ b/ci/scripts/run-check_ci.sh
@@ -21,7 +21,9 @@ pslot=${2:-${pslot:-?}} # Name of the experiment being tested by this scr
# │ └── ${pslot}
# └── EXPDIR
# └── ${pslot}
-HOMEgfs="${TEST_DIR}/HOMEgfs"
+# Two system build directories created at build time gfs, and gdas
+# TODO: Make this configurable (for now all scripts run from gfs for CI at runtime)
+HOMEgfs="${TEST_DIR}/gfs"
RUNTESTS="${TEST_DIR}/RUNTESTS"
# Source modules and setup logging
diff --git a/ci/scripts/utils/ci_utils.sh b/ci/scripts/utils/ci_utils.sh
index 737a3e5a86..6f2426c388 100755
--- a/ci/scripts/utils/ci_utils.sh
+++ b/ci/scripts/utils/ci_utils.sh
@@ -1,24 +1,120 @@
#!/bin/env bash
-function cancel_slurm_jobs() {
+function determine_scheduler() {
+ if command -v sbatch &> /dev/null; then
+ echo "slurm";
+ elif command -v qsub &> /dev/null; then
+ echo "torque";
+ else
+ echo "unknown"
+ fi
+}
- # Usage: cancel_slurm_jobs
- # Example: cancel_slurm_jobs "C48_ATM_3c4e7f74"
+function cancel_batch_jobs() {
+ # Usage: cancel_batch_jobs
+ # Example: cancel_batch_jobs "C48_ATM_3c4e7f74"
#
- # Cancel all Slurm jobs that have the given substring in their name
+ # Cancel all batch jobs that have the given substring in their name
# So like in the example all jobs with "C48_ATM_3c4e7f74"
# in their name will be canceled
local substring=$1
local job_ids
- job_ids=$(squeue -u "${USER}" -h -o "%i")
-
- for job_id in ${job_ids}; do
- job_name=$(sacct -j "${job_id}" --format=JobName%100 | head -3 | tail -1 | sed -r 's/\s+//g') || true
- if [[ "${job_name}" =~ ${substring} ]]; then
- echo "Canceling Slurm Job ${job_name} with: scancel ${job_id}"
- scancel "${job_id}"
- continue
- fi
- done
+
+ scheduler=$(determine_scheduler)
+
+ if [[ "${schduler}" == "torque" ]]; then
+ job_ids=$(qstat -u "${USER}" | awk '{print $1}') || true
+
+ for job_id in ${job_ids}; do
+ job_name=$(qstat -f "${job_id}" | grep Job_Name | awk '{print $3}') || true
+ if [[ "${job_name}" =~ ${substring} ]]; then
+ echo "Canceling PBS Job ${job_name} with: qdel ${job_id}"
+ qdel "${job_id}"
+ continue
+ fi
+ done
+
+ elif [[ "${scheduler}" == "slurm" ]]; then
+
+ job_ids=$(squeue -u "${USER}" -h -o "%i")
+
+ for job_id in ${job_ids}; do
+ job_name=$(sacct -j "${job_id}" --format=JobName%100 | head -3 | tail -1 | sed -r 's/\s+//g') || true
+ if [[ "${job_name}" =~ ${substring} ]]; then
+ echo "Canceling Slurm Job ${job_name} with: scancel ${job_id}"
+ scancel "${job_id}"
+ continue
+ fi
+ done
+
+ else
+ echo "FATAL: Unknown/unsupported job scheduler"
+ exit 1
+ fi
+}
+
+
+function get_pr_case_list () {
+
+ #############################################################
+ # loop over every yaml file in the PR's ci/cases
+ # and create an run directory for each one for this PR loop
+ #############################################################
+ for yaml_config in "${HOMEgfs}/ci/cases/pr/"*.yaml; do
+ case=$(basename "${yaml_config}" .yaml) || true
+ echo "${case}"
+ done
+}
+
+function get_pslot_list () {
+
+ local RUNTESTS="${1}"
+
+ #############################################################
+ # loop over expdir directories in RUNTESTS
+ # and create list of the directory names (pslot) with the hash tag
+ #############################################################
+ for pslot_dir in "${RUNTESTS}/EXPDIR/"*; do
+ pslot=$(basename "${pslot_dir}") || true
+ echo "${pslot}"
+ done
+
+}
+
+function get_pslot () {
+
+ local RUNTESTS="${1}"
+ local case="${2}"
+
+ #############################################################
+ # loop over expdir directories in RUNTESTS
+ # and return the name of the pslot with its tag that matches the case
+ #############################################################
+ for pslot_dir in "${RUNTESTS}/EXPDIR/"*; do
+ pslot=$(basename "${pslot_dir}")
+ check_case=$(echo "${pslot}" | rev | cut -d"_" -f2- | rev) || true
+ if [[ "${check_case}" == "${case}" ]]; then
+ echo "${pslot}"
+ break
+ fi
+ done
+
+}
+
+function create_experiment () {
+
+ local yaml_config="${1}"
+ cd "${HOMEgfs}" || exit 1
+ pr_sha=$(git rev-parse --short HEAD)
+ case=$(basename "${yaml_config}" .yaml) || true
+ export pslot=${case}_${pr_sha}
+
+ source "${HOMEgfs}/ci/platforms/config.${MACHINE_ID}"
+ source "${HOMEgfs}/workflow/gw_setup.sh"
+
+ # system=$(grep "system:" "${yaml_config}" | cut -d":" -f2 | tr -d " ") || true
+
+ "${HOMEgfs}/${system}/workflow/create_experiment.py" --overwrite --yaml "${yaml_config}"
+
}
diff --git a/ci/scripts/utils/ci_utils_wrapper.sh b/ci/scripts/utils/ci_utils_wrapper.sh
new file mode 100755
index 0000000000..51c392fb99
--- /dev/null
+++ b/ci/scripts/utils/ci_utils_wrapper.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+HOMEgfs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." >/dev/null 2>&1 && pwd )"
+source "${HOMEgfs}/ush/detect_machine.sh"
+
+utitilty_function="${1}"
+
+source "${HOMEgfs}/ci/scripts/utils/ci_utils.sh"
+${utitilty_function} "${@:2}"
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 2eb786199a..4381488078 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -10,7 +10,7 @@ Status
======
* State of develop (HEAD) branch: GFSv17+ development
-* State of operations (dev/gfs.v16 branch): GFS v16.3.12 `tag: [gfs.v16.3.12] `_
+* State of operations (dev/gfs.v16 branch): GFS v16.3.13 `tag: [gfs.v16.3.13] `_
=============
Code managers
diff --git a/env/AWSPW.env b/env/AWSPW.env
index 894cce2343..ea5002ecb9 100755
--- a/env/AWSPW.env
+++ b/env/AWSPW.env
@@ -14,7 +14,6 @@ fi
step=$1
-export npe_node_max=36
export launcher="mpiexec.hydra"
export mpmd_opt=""
diff --git a/env/CONTAINER.env b/env/CONTAINER.env
index bfeb6dd6da..b1f55a4c98 100755
--- a/env/CONTAINER.env
+++ b/env/CONTAINER.env
@@ -14,7 +14,6 @@ fi
step=$1
-export npe_node_max=40
export launcher="mpirun"
export mpmd_opt="--multi-prog"
diff --git a/env/HERA.env b/env/HERA.env
index fb156645f8..057a2313f8 100755
--- a/env/HERA.env
+++ b/env/HERA.env
@@ -14,7 +14,6 @@ fi
step=$1
-export npe_node_max=40
export launcher="srun -l --export=ALL"
export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out"
@@ -94,31 +93,19 @@ elif [[ "${step}" = "ocnanalbmat" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- nth_max=$((npe_node_max / npe_node_ocnanalbmat))
-
- export NTHREADS_OCNANAL=${nth_ocnanalbmat:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat}"
elif [[ "${step}" = "ocnanalrun" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- nth_max=$((npe_node_max / npe_node_ocnanalrun))
-
- export NTHREADS_OCNANAL=${nth_ocnanalrun:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
elif [[ "${step}" = "ocnanalchkpt" ]]; then
export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
- nth_max=$((npe_node_max / npe_node_ocnanalchkpt))
-
- export NTHREADS_OCNANAL=${nth_ocnanalchkpt:-${nth_max}}
- [[ ${NTHREADS_OCNANAL} -gt ${nth_max} ]] && export NTHREADS_OCNANAL=${nth_max}
- export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt} --cpus-per-task=${NTHREADS_OCNANAL}"
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt}"
elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
diff --git a/env/HERCULES.env b/env/HERCULES.env
index 6a4aad7a7d..ebfa51398b 100755
--- a/env/HERCULES.env
+++ b/env/HERCULES.env
@@ -12,7 +12,6 @@ fi
step=$1
-export npe_node_max=80
export launcher="srun -l --export=ALL"
export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out"
diff --git a/env/JET.env b/env/JET.env
index 7bb152c5f3..eada0b1c70 100755
--- a/env/JET.env
+++ b/env/JET.env
@@ -14,13 +14,6 @@ fi
step=$1
-if [[ "${PARTITION_BATCH}" = "xjet" ]]; then
- export npe_node_max=24
-elif [[ "${PARTITION_BATCH}" = "vjet" ]]; then
- export npe_node_max=16
-elif [[ "${PARTITION_BATCH}" = "kjet" ]]; then
- export npe_node_max=40
-fi
export launcher="srun -l --epilog=/apps/local/bin/report-mem --export=ALL"
export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out"
diff --git a/env/ORION.env b/env/ORION.env
index d91fd4db03..c5e94cc559 100755
--- a/env/ORION.env
+++ b/env/ORION.env
@@ -14,7 +14,6 @@ fi
step=$1
-export npe_node_max=40
export launcher="srun -l --export=ALL"
export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out"
diff --git a/env/S4.env b/env/S4.env
index 3dab3fc3e7..b103e865d3 100755
--- a/env/S4.env
+++ b/env/S4.env
@@ -13,13 +13,7 @@ if [[ $# -ne 1 ]]; then
fi
step=$1
-PARTITION_BATCH=${PARTITION_BATCH:-"s4"}
-if [[ ${PARTITION_BATCH} = "s4" ]]; then
- export npe_node_max=32
-elif [[ ${PARTITION_BATCH} = "ivy" ]]; then
- export npe_node_max=20
-fi
export launcher="srun -l --export=ALL"
export mpmd_opt="--multi-prog --output=mpmd.%j.%t.out"
diff --git a/env/WCOSS2.env b/env/WCOSS2.env
index a4fe81060d..307ad71c43 100755
--- a/env/WCOSS2.env
+++ b/env/WCOSS2.env
@@ -18,8 +18,6 @@ step=$1
export launcher="mpiexec -l"
export mpmd_opt="--cpu-bind verbose,core cfp"
-export npe_node_max=128
-
if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
nth_max=$((npe_node_max / npe_node_prep))
diff --git a/jobs/JGDAS_ENKF_FCST b/jobs/JGDAS_ENKF_FCST
deleted file mode 100755
index 53408df8cf..0000000000
--- a/jobs/JGDAS_ENKF_FCST
+++ /dev/null
@@ -1,84 +0,0 @@
-#! /usr/bin/env bash
-
-source "${HOMEgfs}/ush/preamble.sh"
-source "${HOMEgfs}/ush/jjob_header.sh" -e "efcs" -c "base fcst efcs"
-
-
-##############################################
-# Set variables used in the script
-##############################################
-export CDUMP=${RUN/enkf}
-
-##############################################
-# Begin JOB SPECIFIC work
-##############################################
-
-export CASE=${CASE_ENS}
-
-YMD=${PDY} HH=${cyc} generate_com -rx COM_TOP
-
-
-# Forecast length for EnKF forecast
-export FHMIN=${FHMIN_ENKF}
-export FHOUT=${FHOUT_ENKF}
-export FHMAX=${FHMAX_ENKF}
-
-# Get ENSBEG/ENSEND from ENSGRP and NMEM_EFCSGRP
-if [[ $CDUMP == "gfs" ]]; then
- export NMEM_EFCSGRP=${NMEM_EFCSGRP_GFS:-${NMEM_EFCSGRP:-1}}
-fi
-export ENSEND=$((NMEM_EFCSGRP * 10#${ENSGRP}))
-export ENSBEG=$((ENSEND - NMEM_EFCSGRP + 1))
-
-if [[ ${DO_WAVE} == "YES" ]]; then
- declare -rx RUNwave="${RUN}wave"
-fi
-
-###############################################################
-# Run relevant script
-
-${ENKFFCSTSH:-${SCRgfs}/exgdas_enkf_fcst.sh}
-status=$?
-[[ ${status} -ne 0 ]] && exit ${status}
-
-
-# Double check the status of members in ENSGRP
-EFCSGRP="${COM_TOP}/efcs.grp${ENSGRP}"
-npass=0
-if [ -f ${EFCSGRP} ]; then
- npass=$(grep "PASS" ${EFCSGRP} | wc -l)
-fi
-echo "${npass}/${NMEM_EFCSGRP} members successfull in efcs.grp${ENSGRP}"
-if [ ${npass} -ne ${NMEM_EFCSGRP} ]; then
- echo "FATAL ERROR: Failed members in group ${ENSGRP}, ABORT!"
- cat ${EFCSGRP}
- exit 99
-fi
-
-
-##############################################
-# Send Alerts
-##############################################
-if [ ${SENDDBN} = YES ] ; then
- ${DBNROOT}/bin/dbn_alert MODEL ENKF1_MSC_fcsstat ${job} ${EFCSGRP}
-fi
-
-
-##############################################
-# End JOB SPECIFIC work
-##############################################
-
-##############################################
-# Final processing
-##############################################
-if [ -e "${pgmout}" ] ; then
- cat ${pgmout}
-fi
-
-##########################################
-# Remove the Temporary working directory
-##########################################
-cd ${DATAROOT}
-[[ ${KEEPDATA} = "NO" ]] && rm -rf ${DATA}
-
-exit 0
diff --git a/jobs/JGLOBAL_FORECAST b/jobs/JGLOBAL_FORECAST
index b2825af54f..bfdc7e3688 100755
--- a/jobs/JGLOBAL_FORECAST
+++ b/jobs/JGLOBAL_FORECAST
@@ -1,40 +1,19 @@
#! /usr/bin/env bash
source "${HOMEgfs}/ush/preamble.sh"
-source "${HOMEgfs}/ush/jjob_header.sh" -e "fcst" -c "base fcst"
-
-##############################################
-# Set variables used in the script
-##############################################
-export CDUMP=${RUN/enkf}
+if (( 10#${ENSMEM:-0} > 0 )); then
+ source "${HOMEgfs}/ush/jjob_header.sh" -e "efcs" -c "base fcst efcs"
+else
+ source "${HOMEgfs}/ush/jjob_header.sh" -e "fcst" -c "base fcst"
+fi
##############################################
# Begin JOB SPECIFIC work
##############################################
# Restart conditions for GFS cycle come from GDAS
-rCDUMP=${CDUMP}
-[[ ${CDUMP} = "gfs" ]] && export rCDUMP="gdas"
-
-# Forecast length for GFS forecast
-case ${RUN} in
- *gfs | *gefs)
- # shellcheck disable=SC2153
- export FHMAX=${FHMAX_GFS}
- # shellcheck disable=SC2153
- export FHOUT=${FHOUT_GFS}
- export FHMAX_HF=${FHMAX_HF_GFS}
- export FHOUT_HF=${FHOUT_HF_GFS}
- ;;
- *gdas)
- export FHMAX_HF=0
- export FHOUT_HF=0
- ;;
- *)
- echo "FATAL ERROR: Unsupported RUN '${RUN}'"
- exit 1
-esac
-
+rCDUMP=${RUN}
+[[ ${RUN} == "gfs" ]] && export rCDUMP="gdas"
# Ignore possible spelling error (nothing is misspelled)
# shellcheck disable=SC2153
@@ -78,11 +57,21 @@ fi
###############################################################
# Run relevant exglobal script
-
+###############################################################
${FORECASTSH:-${SCRgfs}/exglobal_forecast.sh}
status=$?
-[[ ${status} -ne 0 ]] && exit ${status}
-
+[[ ${status} -ne 0 ]] && exit "${status}"
+
+# Send DBN alerts for EnKF
+# TODO: Should these be in post manager instead?
+if [[ "${RUN}" =~ "enkf" ]] && [[ "${SENDDBN}" = YES ]]; then
+ for (( fhr = FHOUT; fhr <= FHMAX; fhr + FHOUT )); do
+ if (( fhr % 3 == 0 )); then
+ fhr3=$(printf %03i "${fhr}")
+ "${DBNROOT}/bin/dbn_alert" MODEL GFS_ENKF "${job}" "${COM_ATMOS_HISTORY}/${RUN}.t${cyc}z.sfcf${fhr3}.nc"
+ fi
+ done
+fi
##############################################
# End JOB SPECIFIC work
@@ -91,15 +80,14 @@ status=$?
##############################################
# Final processing
##############################################
-if [ -e "${pgmout}" ] ; then
- cat ${pgmout}
+if [[ -e "${pgmout}" ]] ; then
+ cat "${pgmout}"
fi
##########################################
# Remove the Temporary working directory
##########################################
-cd ${DATAROOT}
-[[ ${KEEPDATA} = "NO" ]] && rm -rf ${DATA}
-
+cd "${DATAROOT}" || true
+[[ ${KEEPDATA} = "NO" ]] && rm -rf "${DATA}"
exit 0
diff --git a/jobs/JGLOBAL_LAND_ANALYSIS b/jobs/JGLOBAL_LAND_ANALYSIS
index 3ff7e72a35..0cef66de09 100755
--- a/jobs/JGLOBAL_LAND_ANALYSIS
+++ b/jobs/JGLOBAL_LAND_ANALYSIS
@@ -1,6 +1,7 @@
#! /usr/bin/env bash
source "${HOMEgfs}/ush/preamble.sh"
+export DATA=${DATA:-${DATAROOT}/${RUN}landanl_${cyc}}
source "${HOMEgfs}/ush/jjob_header.sh" -e "landanl" -c "base landanl"
##############################################
diff --git a/jobs/JGLOBAL_PREP_LAND_OBS b/jobs/JGLOBAL_PREP_LAND_OBS
index 025adae529..9b14451568 100755
--- a/jobs/JGLOBAL_PREP_LAND_OBS
+++ b/jobs/JGLOBAL_PREP_LAND_OBS
@@ -1,6 +1,7 @@
#! /usr/bin/env bash
source "${HOMEgfs}/ush/preamble.sh"
+export DATA=${DATA:-${DATAROOT}/${RUN}landanl_${cyc}}
source "${HOMEgfs}/ush/jjob_header.sh" -e "preplandobs" -c "base preplandobs"
##############################################
diff --git a/jobs/JGLOBAL_PREP_OCEAN_OBS b/jobs/JGLOBAL_PREP_OCEAN_OBS
index a100aca89c..a6fcf9c9b3 100755
--- a/jobs/JGLOBAL_PREP_OCEAN_OBS
+++ b/jobs/JGLOBAL_PREP_OCEAN_OBS
@@ -15,7 +15,7 @@ YMD=${PDY} HH=${cyc} generate_com -rx COMOUT_OBS:COM_OBS_TMPL
##############################################
# Add prep_marine_obs.py to PYTHONPATH
-export PYTHONPATH=${HOMEgfs}/sorc/gdas.cd/ush/soca:${PYTHONPATH}
+export PYTHONPATH=${HOMEgfs}/sorc/gdas.cd/ush:${PYTHONPATH}
###############################################################
# Run relevant script
@@ -38,7 +38,7 @@ if [[ -e "${pgmout}" ]] ; then
fi
##########################################
-# Handle the temporary working directory
+# Handle the temporary working directory
##########################################
cd "${DATAROOT}" || (echo "FATAL ERROR: ${DATAROOT} does not exist. ABORT!"; exit 1)
[[ ${KEEPDATA} = "NO" ]] && rm -rf "${DATA}"
diff --git a/jobs/rocoto/efcs.sh b/jobs/rocoto/efcs.sh
deleted file mode 100755
index c5667cb970..0000000000
--- a/jobs/rocoto/efcs.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#! /usr/bin/env bash
-
-source "${HOMEgfs}/ush/preamble.sh"
-
-###############################################################
-# Source FV3GFS workflow modules
-# TODO clean this up once ncdiag/1.1.2 is installed on WCOSS2
-source "${HOMEgfs}/ush/detect_machine.sh"
-if [[ "${MACHINE_ID}" == "wcoss2" ]]; then
- . ${HOMEgfs}/ush/load_ufswm_modules.sh
-else
- . ${HOMEgfs}/ush/load_fv3gfs_modules.sh
-fi
-status=$?
-[[ ${status} -ne 0 ]] && exit ${status}
-
-export job="efcs"
-export jobid="${job}.$$"
-
-###############################################################
-# Execute the JJOB
-"${HOMEgfs}/jobs/JGDAS_ENKF_FCST"
-status=$?
-
-exit ${status}
diff --git a/modulefiles/module-setup.csh.inc b/modulefiles/module-setup.csh.inc
deleted file mode 100644
index 7086326627..0000000000
--- a/modulefiles/module-setup.csh.inc
+++ /dev/null
@@ -1,87 +0,0 @@
-set __ms_shell=csh
-
-eval "if ( -d / ) set __ms_shell=tcsh"
-
-if ( { test -d /lfs/f1 } ) then
- # We are on NOAA Cactus or Dogwood
- if ( ! { module help >& /dev/null } ) then
- source /usr/share/lmod/lmod/init/$__ms_shell
- fi
- module reset
-else if ( { test -d /lfs3 } ) then
- if ( ! { module help >& /dev/null } ) then
- source /apps/lmod/lmod/init/$__ms_shell
- endif
- module purge
-else if ( { test -d /scratch1 } ) then
- # We are on NOAA Hera
- if ( ! { module help >& /dev/null } ) then
- source /apps/lmod/lmod/init/$__ms_shell
- endif
- module purge
-elif [[ -d /work ]] ; then
- # We are on MSU Orion or Hercules
- if [[ -d /apps/other ]] ; then
- # Hercules
- init_path="/apps/other/lmod/lmod/init/$__ms_shell"
- else
- # Orion
- init_path="/apps/lmod/lmod/init/$__ms_shell"
- fi
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source "${init_path}"
- fi
- module purge
-else if ( { test -d /data/prod } ) then
- # We are on SSEC S4
- if ( ! { module help >& /dev/null } ) then
- source /usr/share/lmod/lmod/init/$__ms_shell
- endif
- source /etc/profile
- module purge
-else if ( { test -d /glade } ) then
- # We are on NCAR Yellowstone
- if ( ! { module help >& /dev/null } ) then
- source /usr/share/Modules/init/$__ms_shell
- endif
- module purge
-else if ( { test -d /lustre -a -d /ncrc } ) then
- # We are on GAEA.
- if ( ! { module help >& /dev/null } ) then
- # We cannot simply load the module command. The GAEA
- # /etc/csh.login modifies a number of module-related variables
- # before loading the module command. Without those variables,
- # the module command fails. Hence we actually have to source
- # /etc/csh.login here.
- source /etc/csh.login
- set __ms_source_etc_csh_login=yes
- else
- set __ms_source_etc_csh_login=no
- endif
- module purge
- unsetenv _LMFILES_
- unsetenv _LMFILES_000
- unsetenv _LMFILES_001
- unsetenv LOADEDMODULES
- module load modules
- if ( { test -d /opt/cray/ari/modulefiles } ) then
- module use -a /opt/cray/ari/modulefiles
- endif
- if ( { test -d /opt/cray/pe/ari/modulefiles } ) then
- module use -a /opt/cray/pe/ari/modulefiles
- endif
- if ( { test -d /opt/cray/pe/craype/default/modulefiles } ) then
- module use -a /opt/cray/pe/craype/default/modulefiles
- endif
- setenv NCEPLIBS /lustre/f1/pdata/ncep_shared/NCEPLIBS/lib
- if ( { test -d /lustre/f1/pdata/ncep_shared/NCEPLIBS/lib } ) then
- module use $NCEPLIBS/modulefiles
- endif
- if ( "$__ms_source_etc_csh_login" == yes ) then
- source /etc/csh.login
- unset __ms_source_etc_csh_login
- endif
-else
- # Workaround for csh limitation. Use sh to print to stderr.
- sh -c 'echo WARNING: UNKNOWN PLATFORM 1>&2'
-endif
diff --git a/modulefiles/module-setup.sh.inc b/modulefiles/module-setup.sh.inc
deleted file mode 100644
index db9dabffe1..0000000000
--- a/modulefiles/module-setup.sh.inc
+++ /dev/null
@@ -1,110 +0,0 @@
-# Create a test function for sh vs. bash detection. The name is
-# randomly generated to reduce the chances of name collision.
-__ms_function_name="setup__test_function__$$"
-eval "$__ms_function_name() { /bin/true ; }"
-
-# Determine which shell we are using
-__ms_ksh_test=$( eval '__text="text" ; if [[ $__text =~ ^(t).* ]] ; then printf "%s" ${.sh.match[1]} ; fi' 2> /dev/null | cat )
-__ms_bash_test=$( eval 'if ( set | grep '$__ms_function_name' | grep -v name > /dev/null 2>&1 ) ; then echo t ; fi ' 2> /dev/null | cat )
-
-if [[ ! -z "$__ms_ksh_test" ]] ; then
- __ms_shell=ksh
-elif [[ ! -z "$__ms_bash_test" ]] ; then
- __ms_shell=bash
-else
- # Not bash or ksh, so assume sh.
- __ms_shell=sh
-fi
-
-if [[ -d /lfs/f1 ]] ; then
- # We are on NOAA Cactus or Dogwood
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source /usr/share/lmod/lmod/init/$__ms_shell
- fi
- module reset
-elif [[ -d /mnt/lfs1 ]] ; then
- # We are on NOAA Jet
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source /apps/lmod/lmod/init/$__ms_shell
- fi
- module purge
-elif [[ -d /scratch1 ]] ; then
- # We are on NOAA Hera
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source /apps/lmod/lmod/init/$__ms_shell
- fi
- module purge
-elif [[ -d /work ]] ; then
- # We are on MSU Orion or Hercules
- if [[ -d /apps/other ]] ; then
- # Hercules
- init_path="/apps/other/lmod/lmod/init/$__ms_shell"
- else
- # Orion
- init_path="/apps/lmod/lmod/init/$__ms_shell"
- fi
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source "${init_path}"
- fi
- module purge
-elif [[ -d /glade ]] ; then
- # We are on NCAR Yellowstone
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- . /usr/share/Modules/init/$__ms_shell
- fi
- module purge
-elif [[ -d /lustre && -d /ncrc ]] ; then
- # We are on GAEA.
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- # We cannot simply load the module command. The GAEA
- # /etc/profile modifies a number of module-related variables
- # before loading the module command. Without those variables,
- # the module command fails. Hence we actually have to source
- # /etc/profile here.
- source /etc/profile
- __ms_source_etc_profile=yes
- else
- __ms_source_etc_profile=no
- fi
- module purge
- # clean up after purge
- unset _LMFILES_
- unset _LMFILES_000
- unset _LMFILES_001
- unset LOADEDMODULES
- module load modules
- if [[ -d /opt/cray/ari/modulefiles ]] ; then
- module use -a /opt/cray/ari/modulefiles
- fi
- if [[ -d /opt/cray/pe/ari/modulefiles ]] ; then
- module use -a /opt/cray/pe/ari/modulefiles
- fi
- if [[ -d /opt/cray/pe/craype/default/modulefiles ]] ; then
- module use -a /opt/cray/pe/craype/default/modulefiles
- fi
- if [[ -s /etc/opt/cray/pe/admin-pe/site-config ]] ; then
- source /etc/opt/cray/pe/admin-pe/site-config
- fi
- export NCEPLIBS=/lustre/f1/pdata/ncep_shared/NCEPLIBS/lib
- if [[ -d "$NCEPLIBS" ]] ; then
- module use $NCEPLIBS/modulefiles
- fi
- if [[ "$__ms_source_etc_profile" == yes ]] ; then
- source /etc/profile
- unset __ms_source_etc_profile
- fi
-elif [[ -d /data/prod ]] ; then
- # We are on SSEC's S4
- if ( ! eval module help > /dev/null 2>&1 ) ; then
- source /usr/share/lmod/lmod/init/$__ms_shell
- fi
- module purge
-else
- echo WARNING: UNKNOWN PLATFORM 1>&2
-fi
-
-unset __ms_shell
-unset __ms_ksh_test
-unset __ms_bash_test
-unset $__ms_function_name
-unset __ms_function_name
diff --git a/modulefiles/module_gwsetup.hera.lua b/modulefiles/module_gwsetup.hera.lua
index 0d166d2728..6531bbe09e 100644
--- a/modulefiles/module_gwsetup.hera.lua
+++ b/modulefiles/module_gwsetup.hera.lua
@@ -14,5 +14,7 @@ load(pathJoin("python", python_ver))
load("py-jinja2")
load("py-pyyaml")
load("py-numpy")
+local git_ver=os.getenv("git_ver") or "2.40.0"
+load(pathJoin("git", git_ver))
whatis("Description: GFS run setup environment")
diff --git a/parm/config/gefs/config.base.emc.dyn b/parm/config/gefs/config.base.emc.dyn
index d6f3069baf..ff2fe3377b 100644
--- a/parm/config/gefs/config.base.emc.dyn
+++ b/parm/config/gefs/config.base.emc.dyn
@@ -219,13 +219,7 @@ export gfs_cyc=@gfs_cyc@ # 0: no GFS cycle, 1: 00Z only, 2: 00Z and 12Z only, 4:
# GFS output and frequency
export FHMIN_GFS=0
export FHMIN=${FHMIN_GFS}
-
-export FHMAX_GFS_00=120
-export FHMAX_GFS_06=120
-export FHMAX_GFS_12=120
-export FHMAX_GFS_18=120
-current_fhmax_var=FHMAX_GFS_${cyc}; declare -x FHMAX_GFS=${!current_fhmax_var}
-
+export FHMAX_GFS=@FHMAX_GFS@
export FHOUT_GFS=6 # Must be 6 for S2S until #1629 is addressed; 3 for ops
export FHMAX_HF_GFS=0
export FHOUT_HF_GFS=1
diff --git a/parm/config/gefs/config.efcs b/parm/config/gefs/config.efcs
index 9593408848..a6f34818d7 100644
--- a/parm/config/gefs/config.efcs
+++ b/parm/config/gefs/config.efcs
@@ -5,14 +5,16 @@
echo "BEGIN: config.efcs"
-# Turn off components in ensemble via _ENKF, or use setting from deterministic
-export DO_AERO=${DO_AERO_ENKF:-${DO_AERO:-"NO"}}
-export DO_OCN=${DO_OCN_ENKF:-${DO_OCN:-"NO"}}
-export DO_ICE=${DO_ICE_ENKF:-${DO_ICE:-"NO"}}
-export DO_WAVE=${DO_WAVE_ENKF:-${DO_WAVE:-"NO"}}
+# Turn off components in ensemble
+# export DO_AERO="NO"
+# export DO_OCN="NO"
+# export DO_ICE="NO"
+# export DO_WAVE="NO"
+
+export CASE="${CASE_ENS}"
# Source model specific information that is resolution dependent
-string="--fv3 ${CASE_ENS}"
+string="--fv3 ${CASE}"
# Ocean/Ice/Waves ensemble configurations are identical to deterministic member
[[ "${DO_OCN}" == "YES" ]] && string="${string} --mom6 ${OCNRES}"
[[ "${DO_ICE}" == "YES" ]] && string="${string} --cice6 ${ICERES}"
@@ -24,17 +26,22 @@ source "${EXPDIR}/config.ufs" ${string}
# Get task specific resources
source "${EXPDIR}/config.resources" efcs
+# nggps_diag_nml
+export FHOUT=${FHOUT_ENKF:-3}
+if [[ "${RUN}" == "enkfgfs" ]]; then
+ export FHOUT=${FHOUT_ENKF_GFS:-${FHOUT}}
+fi
+
+# model_configure
+export FHMAX=${FHMAX_ENKF:-9}
+if [[ "${RUN}" == "enkfgfs" ]]; then
+ export FHMAX=${FHMAX_ENKF_GFS:-${FHMAX}}
+fi
+
# Use serial I/O for ensemble (lustre?)
export OUTPUT_FILETYPE_ATM="netcdf"
export OUTPUT_FILETYPE_SFC="netcdf"
-# Number of enkf members per fcst job
-export NMEM_EFCSGRP=1
-export RERUN_EFCSGRP="NO"
-
-# Turn off inline UPP for EnKF forecast
-export WRITE_DOPOST=".true."
-
# Stochastic physics parameters (only for ensemble forecasts)
export DO_SKEB="YES"
export SKEB=0.3
@@ -54,6 +61,6 @@ export SPPT_LSCALE=500000.
export SPPT_LOGIT=".true."
export SPPT_SFCLIMIT=".true."
-export restart_interval=${restart_interval_gfs}
+export restart_interval="${restart_interval_gfs}"
echo "END: config.efcs"
diff --git a/parm/config/gefs/config.fcst b/parm/config/gefs/config.fcst
index 6f4a11f7c4..6a2a852e0b 100644
--- a/parm/config/gefs/config.fcst
+++ b/parm/config/gefs/config.fcst
@@ -21,6 +21,12 @@ string="--fv3 ${CASE}"
# shellcheck disable=SC2086
source "${EXPDIR}/config.ufs" ${string}
+# shellcheck disable=SC2153
+export FHMAX=${FHMAX_GFS}
+# shellcheck disable=SC2153
+export FHOUT=${FHOUT_GFS}
+export FHMAX_HF=${FHMAX_HF_GFS}
+export FHOUT_HF=${FHOUT_HF_GFS}
# Get task specific resources
source "${EXPDIR}/config.resources" fcst
diff --git a/parm/config/gefs/config.ufs b/parm/config/gefs/config.ufs
index 2031d0b538..866de52964 100644
--- a/parm/config/gefs/config.ufs
+++ b/parm/config/gefs/config.ufs
@@ -68,54 +68,6 @@ if [[ "${skip_mom6}" == "false" ]] || [[ "${skip_cice6}" == "false" ]] || [[ "${
skip_mediator=false
fi
-case "${machine}" in
- "WCOSS2")
- npe_node_max=128
- ;;
- "HERA" | "ORION" )
- npe_node_max=40
- ;;
- "HERCULES" )
- npe_node_max=80
- ;;
- "JET")
- case "${PARTITION_BATCH}" in
- "xjet")
- npe_node_max=24
- ;;
- "vjet" | "sjet")
- npe_node_max=16
- ;;
- "kjet")
- npe_node_max=40
- ;;
- *)
- echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!"
- exit 1
- ;;
- esac
- ;;
- "S4")
- case "${PARTITION_BATCH}" in
- "s4")
- npe_node_max=32
- ;;
- "ivy")
- npe_node_max=20
- ;;
- *)
- echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!"
- exit 1
- ;;
- esac
- ;;
- *)
- echo "FATAL ERROR: Unrecognized machine ${machine}"
- exit 14
- ;;
-esac
-export npe_node_max
-
# (Standard) Model resolution dependent variables
case "${fv3_res}" in
"C48")
diff --git a/parm/config/gefs/yaml/defaults.yaml b/parm/config/gefs/yaml/defaults.yaml
index ce5d8aeb3d..84dfcbf718 100644
--- a/parm/config/gefs/yaml/defaults.yaml
+++ b/parm/config/gefs/yaml/defaults.yaml
@@ -4,3 +4,4 @@ base:
DO_JEDIOCNVAR: "NO"
DO_JEDILANDDA: "NO"
DO_MERGENSST: "NO"
+ FHMAX_GFS: 120
diff --git a/parm/config/gfs/config.base.emc.dyn b/parm/config/gfs/config.base.emc.dyn
index 2ce4d937c3..d3e28eb477 100644
--- a/parm/config/gfs/config.base.emc.dyn
+++ b/parm/config/gfs/config.base.emc.dyn
@@ -256,13 +256,7 @@ export gfs_cyc=@gfs_cyc@ # 0: no GFS cycle, 1: 00Z only, 2: 00Z and 12Z only, 4:
# GFS output and frequency
export FHMIN_GFS=0
-
-export FHMAX_GFS_00=120
-export FHMAX_GFS_06=120
-export FHMAX_GFS_12=120
-export FHMAX_GFS_18=120
-current_fhmax_var=FHMAX_GFS_${cyc}; declare -x FHMAX_GFS=${!current_fhmax_var}
-
+export FHMAX_GFS=@FHMAX_GFS@
export FHOUT_GFS=6 # Must be 6 for S2S until #1629 is addressed; 3 for ops
export FHMAX_HF_GFS=0
export FHOUT_HF_GFS=1
@@ -389,6 +383,7 @@ else
export DO_METP="YES" # Run METPLUS jobs - set METPLUS settings in config.metp; not supported with spack-stack
fi
export DO_FIT2OBS="YES" # Run fit to observations package
+export DO_VRFY_OCEANDA="NO" # Run SOCA Ocean DA verification tasks
# Archiving options
export HPSSARCH="@HPSSARCH@" # save data to HPSS archive
diff --git a/parm/config/gfs/config.efcs b/parm/config/gfs/config.efcs
index 283ec3ab7e..7a48feb4ce 100644
--- a/parm/config/gfs/config.efcs
+++ b/parm/config/gfs/config.efcs
@@ -5,14 +5,16 @@
echo "BEGIN: config.efcs"
-# Turn off components in ensemble via _ENKF, or use setting from deterministic
-export DO_AERO=${DO_AERO_ENKF:-${DO_AERO:-"NO"}}
-export DO_OCN=${DO_OCN_ENKF:-${DO_OCN:-"NO"}}
-export DO_ICE=${DO_ICE_ENKF:-${DO_ICE:-"NO"}}
-export DO_WAVE=${DO_WAVE_ENKF:-${DO_WAVE:-"NO"}}
+# Turn off components in ensemble
+# export DO_AERO="NO"
+# export DO_OCN="NO"
+# export DO_ICE="NO"
+export DO_WAVE="NO"
+
+export CASE="${CASE_ENS}"
# Source model specific information that is resolution dependent
-string="--fv3 ${CASE_ENS}"
+string="--fv3 ${CASE}"
# Ocean/Ice/Waves ensemble configurations are identical to deterministic member
[[ "${DO_OCN}" == "YES" ]] && string="${string} --mom6 ${OCNRES}"
[[ "${DO_ICE}" == "YES" ]] && string="${string} --cice6 ${ICERES}"
@@ -25,15 +27,23 @@ source "${EXPDIR}/config.ufs" ${string}
# Get task specific resources
. "${EXPDIR}/config.resources" efcs
+# nggps_diag_nml
+export FHOUT=${FHOUT_ENKF:-3}
+if [[ ${RUN} == "enkfgfs" ]]; then
+ export FHOUT=${FHOUT_ENKF_GFS:-${FHOUT}}
+fi
+
+# model_configure
+export FHMIN=${FHMIN_ENKF:-3}
+export FHMAX=${FHMAX_ENKF:-9}
+if [[ ${RUN} == "enkfgfs" ]]; then
+ export FHMAX=${FHMAX_ENKF_GFS:-${FHMAX}}
+fi
+
# Use serial I/O for ensemble (lustre?)
export OUTPUT_FILETYPE_ATM="netcdf"
export OUTPUT_FILETYPE_SFC="netcdf"
-# Number of enkf members per fcst job
-export NMEM_EFCSGRP=2
-export NMEM_EFCSGRP_GFS=1
-export RERUN_EFCSGRP="NO"
-
# Turn off inline UPP for EnKF forecast
export WRITE_DOPOST=".false."
@@ -56,14 +66,33 @@ export SPPT_LSCALE=500000.
export SPPT_LOGIT=".true."
export SPPT_SFCLIMIT=".true."
-if [[ "${QUILTING}" = ".true." ]] && [[ "${OUTPUT_GRID}" = "gaussian_grid" ]]; then
+if [[ "${QUILTING}" == ".true." ]] && [[ "${OUTPUT_GRID}" == "gaussian_grid" ]]; then
export DIAG_TABLE="${HOMEgfs}/parm/ufs/fv3/diag_table_da"
else
export DIAG_TABLE="${HOMEgfs}/parm/ufs/fv3/diag_table_da_orig"
fi
+# Model config option for Ensemble
+# export TYPE=nh # choices: nh, hydro
+# export MONO=non-mono # choices: mono, non-mono
+
+# gfs_physics_nml
+export FHSWR=3600.
+export FHLWR=3600.
+export IEMS=1
+export ISOL=2
+export ICO2=2
+export dspheat=".true."
+export shal_cnv=".true."
+export FHZER=6
+
+# Set PREFIX_ATMINC to r when recentering on
+if [[ ${RECENTER_ENKF:-"YES"} == "YES" ]]; then
+ export PREFIX_ATMINC="r"
+fi
+
# For IAU, write restarts at beginning of window also
-if [[ "${DOIAU_ENKF:-}" = "YES" ]]; then
+if [[ "${DOIAU_ENKF:-}" == "YES" ]]; then
export restart_interval="3"
else
export restart_interval="6"
diff --git a/parm/config/gfs/config.fcst b/parm/config/gfs/config.fcst
index 6c4ce639f8..d2e2664e9c 100644
--- a/parm/config/gfs/config.fcst
+++ b/parm/config/gfs/config.fcst
@@ -21,6 +21,24 @@ string="--fv3 ${CASE}"
# shellcheck disable=SC2086
source "${EXPDIR}/config.ufs" ${string}
+# Forecast length for GFS forecast
+case ${RUN} in
+ *gfs)
+ # shellcheck disable=SC2153
+ export FHMAX=${FHMAX_GFS}
+ # shellcheck disable=SC2153
+ export FHOUT=${FHOUT_GFS}
+ export FHMAX_HF=${FHMAX_HF_GFS}
+ export FHOUT_HF=${FHOUT_HF_GFS}
+ ;;
+ *gdas)
+ export FHMAX_HF=0
+ export FHOUT_HF=0
+ ;;
+ *)
+ echo "FATAL ERROR: Unsupported RUN '${RUN}'"
+ exit 1
+esac
# Get task specific resources
source "${EXPDIR}/config.resources" fcst
diff --git a/parm/config/gfs/config.ocn b/parm/config/gfs/config.ocn
index f9e6595ce9..317a76e58a 100644
--- a/parm/config/gfs/config.ocn
+++ b/parm/config/gfs/config.ocn
@@ -16,6 +16,14 @@ if [[ "${DO_JEDIOCNVAR}" == "YES" ]]; then
else
export ODA_INCUPD="False"
fi
-export ODA_INCUPD_NHOURS="3.0" # In MOM_input, this is time interval for applying increment
+
+# Time interval for applying the increment
+if [[ "${DOIAU}" == "YES" ]]; then
+ export ODA_INCUPD_NHOURS="6.0"
+else
+ export ODA_INCUPD_NHOURS="3.0"
+fi
+
+
echo "END: config.ocn"
diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources
index 26d5aa9126..b746a4b32a 100644
--- a/parm/config/gfs/config.resources
+++ b/parm/config/gfs/config.resources
@@ -60,6 +60,9 @@ case ${machine} in
export PARTITION_BATCH="compute"
npe_node_max=40
;;
+ "CONTAINER")
+ npe_node_max=1
+ ;;
*)
echo "FATAL ERROR: Unknown machine encountered by ${BASH_SOURCE[0]}"
exit 2
@@ -349,7 +352,7 @@ case ${step} in
export npe_prepoceanobs=1
export nth_prepoceanobs=1
export npe_node_prepoceanobs=$(( npe_node_max / nth_prepoceanobs ))
- export memory_prepoceanobs="24GB"
+ export memory_prepoceanobs="48GB"
;;
"ocnanalbmat")
@@ -403,13 +406,20 @@ case ${step} in
export nth_ocnanalchkpt=1
export npe_node_ocnanalchkpt=$(( npe_node_max / nth_ocnanalchkpt ))
case ${CASE} in
- "C384") memory_ocnanalchkpt="128GB";;
- "C96") memory_ocnanalchkpt="32GB";;
- "C48") memory_ocnanalchkpt="32GB";;
+ "C384")
+ memory_ocnanalchkpt="128GB"
+ npes=40;;
+ "C96")
+ memory_ocnanalchkpt="32GB"
+ npes=16;;
+ "C48")
+ memory_ocnanalchkpt="32GB"
+ npes=8;;
*)
echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
exit 4
esac
+ export npe_ocnanalchkpt=${npes}
export memory_ocnanalchkpt
;;
@@ -620,15 +630,15 @@ case ${step} in
case "${CASE}" in
"C48" | "C96" | "C192")
- declare -x "wtime_${step}"="00:30:00"
+ declare -x "wtime_${step}"="00:15:00"
declare -x "wtime_${step}_gfs"="03:00:00"
;;
"C384")
- declare -x "wtime_${step}"="00:20:00"
+ declare -x "wtime_${step}"="00:10:00"
declare -x "wtime_${step}_gfs"="06:00:00"
;;
"C768" | "C1152")
- declare -x "wtime_${step}"="01:00:00"
+ declare -x "wtime_${step}"="00:30:00"
declare -x "wtime_${step}_gfs"="06:00:00"
;;
*)
@@ -806,11 +816,11 @@ case ${step} in
;;
"cleanup")
- export wtime_cleanup="01:00:00"
+ export wtime_cleanup="00:15:00"
export npe_cleanup=1
export npe_node_cleanup=1
export nth_cleanup=1
- export memory_cleanu="4096M"
+ export memory_cleanup="4096M"
;;
"stage_ic")
@@ -818,7 +828,7 @@ case ${step} in
export npe_stage_ic=1
export npe_node_stage_ic=1
export nth_stage_ic=1
- export is_exclusive=Tue
+ export is_exclusive=True
;;
"atmensanlinit")
@@ -857,7 +867,7 @@ case ${step} in
"eobs" | "eomg")
export wtime_eobs="00:15:00"
- export wtime_eomg="01:00:00"
+ export wtime_eomg="00:30:00"
case ${CASE} in
"C768") export npe_eobs=200;;
"C384") export npe_eobs=100;;
@@ -923,7 +933,7 @@ case ${step} in
export nth_eupd=4
fi
;;
- *)
+ *)
echo "FATAL ERROR: Resources not defined for job ${job} at resolution ${CASE}"
exit 4
;;
@@ -953,7 +963,7 @@ case ${step} in
export npe_node_esfc=$(( npe_node_max / nth_esfc ))
export nth_cycle=${nth_esfc}
export npe_node_cycle=$(( npe_node_max / nth_cycle ))
- export memory_esfc="8GB"
+ export memory_esfc="80GB"
;;
"epos")
@@ -983,7 +993,7 @@ case ${step} in
export npe_awips=1
export npe_node_awips=1
export nth_awips=1
- export memory_awip="3GB"
+ export memory_awips="3GB"
;;
"npoess")
@@ -991,7 +1001,7 @@ case ${step} in
export npe_npoess=1
export npe_node_npoess=1
export nth_npoess=1
- export memory_npoes="3GB"
+ export memory_npoess="3GB"
;;
"gempak")
@@ -1012,7 +1022,7 @@ case ${step} in
export nth_mos_stn_prep=1
export memory_mos_stn_prep="5GB"
export NTASK="${npe_mos_stn_prep}"
- export PTILE="${npe_node_mos_stn_pep}"
+ export PTILE="${npe_node_mos_stn_prep}"
;;
"mos_grd_prep")
@@ -1022,7 +1032,7 @@ case ${step} in
export nth_mos_grd_prep=1
export memory_mos_grd_prep="16GB"
export NTASK="${npe_mos_grd_prep}"
- export PTILE="${npe_node_mos_grd_pep}"
+ export PTILE="${npe_node_mos_grd_prep}"
;;
"mos_ext_stn_prep")
@@ -1032,7 +1042,7 @@ case ${step} in
export nth_mos_ext_stn_prep=1
export memory_mos_ext_stn_prep="5GB"
export NTASK="${npe_mos_ext_stn_prep}"
- export PTILE="${npe_node_mos_ext_stn_pep}"
+ export PTILE="${npe_node_mos_ext_stn_prep}"
;;
"mos_ext_grd_prep")
@@ -1042,7 +1052,7 @@ case ${step} in
export nth_mos_ext_grd_prep=1
export memory_mos_ext_grd_prep="3GB"
export NTASK="${npe_mos_ext_grd_prep}"
- export PTILE="${npe_node_mos_ext_grd_pep}"
+ export PTILE="${npe_node_mos_ext_grd_prep}"
;;
"mos_stn_fcst")
@@ -1052,7 +1062,7 @@ case ${step} in
export nth_mos_stn_fcst=1
export memory_mos_stn_fcst="40GB"
export NTASK="${npe_mos_stn_fcst}"
- export PTILE="${npe_node_mos_stn_fst}"
+ export PTILE="${npe_node_mos_stn_fcst}"
;;
"mos_grd_fcst")
@@ -1062,7 +1072,7 @@ case ${step} in
export nth_mos_grd_fcst=1
export memory_mos_grd_fcst="50GB"
export NTASK="${npe_mos_grd_fcst}"
- export PTILE="${npe_node_mos_grd_fst}"
+ export PTILE="${npe_node_mos_grd_fcst}"
;;
"mos_ext_stn_fcst")
@@ -1083,7 +1093,7 @@ case ${step} in
export nth_mos_ext_grd_fcst=1
export memory_mos_ext_grd_fcst="50GB"
export NTASK="${npe_mos_ext_grd_fcst}"
- export PTILE="${npe_node_mos_ext_grd_fst}"
+ export PTILE="${npe_node_mos_ext_grd_fcst}"
;;
"mos_stn_prdgen")
@@ -1105,7 +1115,7 @@ case ${step} in
export memory_mos_grd_prdgen="20GB"
export NTASK="${npe_mos_grd_prdgen}"
export PTILE="${npe_node_mos_grd_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_grd_prden}"
+ export OMP_NUM_THREADS="${nth_mos_grd_prdgen}"
;;
"mos_ext_stn_prdgen")
@@ -1127,7 +1137,7 @@ case ${step} in
export memory_mos_ext_grd_prdgen="30GB"
export NTASK="${npe_mos_ext_grd_prdgen}"
export PTILE="${npe_node_mos_ext_grd_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_ext_grd_prden}"
+ export OMP_NUM_THREADS="${nth_mos_ext_grd_prdgen}"
;;
"mos_wx_prdgen")
@@ -1138,7 +1148,7 @@ case ${step} in
export memory_mos_wx_prdgen="10GB"
export NTASK="${npe_mos_wx_prdgen}"
export PTILE="${npe_node_mos_wx_prdgen}"
- export OMP_NUM_THREADS="${nth_mos_wx_prden}"
+ export OMP_NUM_THREADS="${nth_mos_wx_prdgen}"
;;
"mos_wx_ext_prdgen")
@@ -1159,4 +1169,4 @@ case ${step} in
esac
-echo "END: config.resources"
\ No newline at end of file
+echo "END: config.resources"
diff --git a/parm/config/gfs/config.ufs b/parm/config/gfs/config.ufs
index 0a59da47ca..c8ce216899 100644
--- a/parm/config/gfs/config.ufs
+++ b/parm/config/gfs/config.ufs
@@ -68,54 +68,6 @@ if [[ "${skip_mom6}" == "false" ]] || [[ "${skip_cice6}" == "false" ]] || [[ "${
skip_mediator=false
fi
-case "${machine}" in
- "WCOSS2")
- npe_node_max=128
- ;;
- "HERA" | "ORION" )
- npe_node_max=40
- ;;
- "HERCULES" )
- npe_node_max=80
- ;;
- "JET")
- case "${PARTITION_BATCH}" in
- "xjet")
- npe_node_max=24
- ;;
- "vjet" | "sjet")
- npe_node_max=16
- ;;
- "kjet")
- npe_node_max=40
- ;;
- *)
- echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!"
- exit 1
- ;;
- esac
- ;;
- "S4")
- case "${PARTITION_BATCH}" in
- "s4")
- npe_node_max=32
- ;;
- "ivy")
- npe_node_max=20
- ;;
- *)
- echo "FATAL ERROR: Unsupported ${machine} PARTITION_BATCH = ${PARTITION_BATCH}, ABORT!"
- exit 1
- ;;
- esac
- ;;
- *)
- echo "FATAL ERROR: Unrecognized machine ${machine}"
- exit 14
- ;;
-esac
-export npe_node_max
-
# (Standard) Model resolution dependent variables
case "${fv3_res}" in
"C48")
diff --git a/parm/config/gfs/yaml/defaults.yaml b/parm/config/gfs/yaml/defaults.yaml
index ade83fa484..10af47de07 100644
--- a/parm/config/gfs/yaml/defaults.yaml
+++ b/parm/config/gfs/yaml/defaults.yaml
@@ -6,6 +6,7 @@ base:
DO_JEDILANDDA: "NO"
DO_MERGENSST: "NO"
DO_GOES: "NO"
+ FHMAX_GFS: 120
atmanl:
IO_LAYOUT_X: 1
@@ -24,13 +25,14 @@ landanl:
IO_LAYOUT_Y: 1
ocnanal:
- SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space. @guillaumevernieres will open an issue
- CASE_ANL: "C48"
- COMIN_OBS: "/scratch2/NCEPDEV/marineda/r2d2-v2-v3" # TODO: make platform agnostic
- SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml"
+ SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space.
+ CASE_ANL: "C48" # TODO: Check in gdasapp if used anywhere for SOCA
+ SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml" # TODO: This is also repeated in oceanprepobs
SOCA_NINNER: 100
- R2D2_OBS_SRC: "gdas_marine"
- R2D2_OBS_DUMP: "s2s_v1"
SABER_BLOCKS_YAML: ""
NICAS_RESOL: 1
NICAS_GRID_SIZE: 15000
+prepoceanobs:
+ SOCA_OBS_LIST: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml" # TODO: This is also repeated in ocnanal
+ OBSPREP_YAML: "{{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obsprep/obsprep_config.yaml"
+ DMPDIR: "/scratch1/NCEPDEV/global/glopara/data/experimental_obs"
diff --git a/parm/post/upp.yaml b/parm/post/upp.yaml
index 651f3c12a8..ea7237dc83 100644
--- a/parm/post/upp.yaml
+++ b/parm/post/upp.yaml
@@ -28,7 +28,7 @@ analysis:
forecast:
config:
- rdaod: False
+ rdaod: True
data_in:
copy:
{% if forecast_hour == 0 %}
@@ -47,7 +47,7 @@ forecast:
goes:
config:
- rdaod: True
+ rdaod: False
data_in:
copy:
{% set crtm_coefficients = [
diff --git a/parm/ufs/gocart/ExtData.other b/parm/ufs/gocart/ExtData.other
index c58b3e9ae8..7a0d63d6ca 100644
--- a/parm/ufs/gocart/ExtData.other
+++ b/parm/ufs/gocart/ExtData.other
@@ -54,10 +54,10 @@ pSO2_OCS NA Y Y %y4-%m2-%d2t12:00:00 none none biofuel /dev/null
# ---------------
# # VOCs - OFFLINE MEGAN BIOG
-OC_ISOPRENE NA N Y %y4-%m2-%d2t12:00:00 none none isoprene ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
-OC_LIMO NA N Y %y4-%m2-%d2t12:00:00 none none limo ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
-OC_MTPA NA N Y %y4-%m2-%d2t12:00:00 none none mtpa ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
-OC_MTPO NA N Y %y4-%m2-%d2t12:00:00 none none mtpo ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
+OC_ISOPRENE NA Y Y %y4-%m2-%d2t12:00:00 none none isoprene ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
+OC_LIMO NA Y Y %y4-%m2-%d2t12:00:00 none none limo ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
+OC_MTPA NA Y Y %y4-%m2-%d2t12:00:00 none none mtpa ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
+OC_MTPO NA Y Y %y4-%m2-%d2t12:00:00 none none mtpo ExtData/nexus/MEGAN_OFFLINE_BVOC/v2021-12/MEGAN_OFFLINE_CLIMO_2000_2022_%m2.nc
# Biofuel Source -- Included in AeroCom anthropogenic emissions
OC_BIOFUEL NA Y Y %y4-%m2-%d2t12:00:00 none none biofuel /dev/null
diff --git a/scripts/exgdas_enkf_fcst.sh b/scripts/exgdas_enkf_fcst.sh
deleted file mode 100755
index fd6136ddd2..0000000000
--- a/scripts/exgdas_enkf_fcst.sh
+++ /dev/null
@@ -1,225 +0,0 @@
-#! /usr/bin/env bash
-
-################################################################################
-#### UNIX Script Documentation Block
-# . .
-# Script name: exgdas_enkf_fcst.sh
-# Script description: Run ensemble forecasts
-#
-# Author: Rahul Mahajan Org: NCEP/EMC Date: 2017-03-02
-#
-# Abstract: This script runs ensemble forecasts serially one-after-another
-#
-# $Id$
-#
-# Attributes:
-# Language: POSIX shell
-#
-####
-################################################################################
-
-source "${HOMEgfs}/ush/preamble.sh"
-
-# Enemble group, begin and end
-ENSGRP=${ENSGRP:-1}
-ENSBEG=${ENSBEG:-1}
-ENSEND=${ENSEND:-1}
-
-# Re-run failed members, or entire group
-RERUN_EFCSGRP=${RERUN_EFCSGRP:-"YES"}
-
-# Recenter flag and increment file prefix
-RECENTER_ENKF=${RECENTER_ENKF:-"YES"}
-export PREFIX_ATMINC=${PREFIX_ATMINC:-""}
-
-################################################################################
-# Preprocessing
-cd "${DATA}" || exit 99
-DATATOP=${DATA}
-
-################################################################################
-# Set output data
-EFCSGRP="${COM_TOP}/efcs.grp${ENSGRP}"
-if [[ -f ${EFCSGRP} ]]; then
- if [[ ${RERUN_EFCSGRP} = "YES" ]]; then
- rm -f "${EFCSGRP}"
- else
- echo "RERUN_EFCSGRP = ${RERUN_EFCSGRP}, will re-run FAILED members only!"
- ${NMV} "${EFCSGRP}" "${EFCSGRP}.fail"
- fi
-fi
-
-################################################################################
-# Set namelist/model config options common to all members once
-
-# There are many many model namelist options
-# Some are resolution (CASE) dependent, some depend on the model configuration
-# and will need to be added here before $FORECASTSH is called
-# For now assume that
-# 1. the ensemble and the deterministic are same resolution
-# 2. the ensemble runs with the same configuration as the deterministic
-
-# Model config option for Ensemble
-export TYPE=${TYPE_ENKF:-${TYPE:-nh}} # choices: nh, hydro
-export MONO=${MONO_ENKF:-${MONO:-non-mono}} # choices: mono, non-mono
-
-# fv_core_nml
-export CASE=${CASE_ENS:-${CASE:-C768}}
-export layout_x=${layout_x_ENKF:-${layout_x:-8}}
-export layout_y=${layout_y_ENKF:-${layout_y:-16}}
-export LEVS=${LEVS_ENKF:-${LEVS:-64}}
-
-# nggps_diag_nml
-export FHOUT=${FHOUT_ENKF:-3}
-if [[ ${RUN} == "enkfgfs" ]]; then
- export FHOUT=${FHOUT_ENKF_GFS:-${FHOUT_ENKF:${FHOUT:-3}}}
-fi
-# model_configure
-export DELTIM=${DELTIM_ENKF:-${DELTIM:-225}}
-export FHMAX=${FHMAX_ENKF:-9}
-if [[ ${RUN} == "enkfgfs" ]]; then
- export FHMAX=${FHMAX_ENKF_GFS:-${FHMAX_ENKF:-${FHMAX}}}
-fi
-
-# gfs_physics_nml
-export FHSWR=${FHSWR_ENKF:-${FHSWR:-3600.}}
-export FHLWR=${FHLWR_ENKF:-${FHLWR:-3600.}}
-export IEMS=${IEMS_ENKF:-${IEMS:-1}}
-export ISOL=${ISOL_ENKF:-${ISOL:-2}}
-export IAER=${IAER_ENKF:-${IAER:-111}}
-export ICO2=${ICO2_ENKF:-${ICO2:-2}}
-export cdmbgwd=${cdmbgwd_ENKF:-${cdmbgwd:-"3.5,0.25"}}
-export dspheat=${dspheat_ENKF:-${dspheat:-".true."}}
-export shal_cnv=${shal_cnv_ENKF:-${shal_cnv:-".true."}}
-export FHZER=${FHZER_ENKF:-${FHZER:-6}}
-export FHCYC=${FHCYC_ENKF:-${FHCYC:-6}}
-
-# Set PREFIX_ATMINC to r when recentering on
-if [[ ${RECENTER_ENKF} = "YES" ]]; then
- export PREFIX_ATMINC="r"
-fi
-
-# Ignore possible spelling error (nothing is misspelled)
-# shellcheck disable=SC2153
-GDATE=$(${NDATE} -"${assim_freq}" "${PDY}${cyc}")
-declare -x gPDY="${GDATE:0:8}"
-declare -x gcyc="${GDATE:8:2}"
-
-################################################################################
-# Run forecast for ensemble member
-rc=0
-for imem in $(seq "${ENSBEG}" "${ENSEND}"); do
-
- cd "${DATATOP}"
-
- ENSMEM=$(printf %03i "${imem}")
- export ENSMEM
- memchar="mem${ENSMEM}"
-
- echo "Processing MEMBER: ${ENSMEM}"
-
- ra=0
-
- skip_mem="NO"
- if [[ -f ${EFCSGRP}.fail ]]; then
- set +e
- memstat=$(grep "MEMBER ${ENSMEM}" "${EFCSGRP}.fail" | grep -c "PASS")
- set_strict
- [[ ${memstat} -eq 1 ]] && skip_mem="YES"
- fi
-
- # Construct COM variables from templates (see config.com)
- # Can't make these read-only because we are looping over members
- MEMDIR="${memchar}" YMD=${PDY} HH=${cyc} generate_com -x COM_ATMOS_RESTART COM_ATMOS_INPUT COM_ATMOS_ANALYSIS \
- COM_ATMOS_HISTORY COM_ATMOS_MASTER COM_CONF
-
- MEMDIR="${memchar}" YMD="${gPDY}" HH="${gcyc}" generate_com -x COM_ATMOS_RESTART_PREV:COM_ATMOS_RESTART_TMPL
-
- if [[ ${DO_WAVE} == "YES" ]]; then
- MEMDIR="${memchar}" YMD=${PDY} HH=${cyc} generate_com -x COM_WAVE_RESTART COM_WAVE_PREP COM_WAVE_HISTORY
- MEMDIR="${memchar}" YMD="${gPDY}" HH="${gcyc}" generate_com -x COM_WAVE_RESTART_PREV:COM_WAVE_RESTART_TMPL
- fi
-
- if [[ ${DO_OCN} == "YES" ]]; then
- MEMDIR="${memchar}" YMD=${PDY} HH=${cyc} generate_com -x COM_MED_RESTART COM_OCEAN_RESTART \
- COM_OCEAN_INPUT COM_OCEAN_HISTORY COM_OCEAN_ANALYSIS
- MEMDIR="${memchar}" YMD="${gPDY}" HH="${gcyc}" generate_com -x COM_OCEAN_RESTART_PREV:COM_OCEAN_RESTART_TMPL
- fi
-
- if [[ ${DO_ICE} == "YES" ]]; then
- MEMDIR="${memchar}" YMD=${PDY} HH=${cyc} generate_com -x COM_ICE_HISTORY COM_ICE_INPUT COM_ICE_RESTART
- MEMDIR="${memchar}" YMD="${gPDY}" HH="${gcyc}" generate_com -x COM_ICE_RESTART_PREV:COM_ICE_RESTART_TMPL
- fi
-
- if [[ ${DO_AERO} == "YES" ]]; then
- MEMDIR="${memchar}" YMD=${PDY} HH=${cyc} generate_com -x COM_CHEM_HISTORY
- fi
-
-
- if [[ ${skip_mem} = "NO" ]]; then
-
- ra=0
-
- export MEMBER=${imem}
- export DATA="${DATATOP}/${memchar}"
- if [[ -d ${DATA} ]]; then rm -rf "${DATA}"; fi
- mkdir -p "${DATA}"
- ${FORECASTSH}
- ra=$?
-
- # Notify a member forecast failed and abort
- if [[ ${ra} -ne 0 ]]; then
- err_exit "FATAL ERROR: forecast of member ${ENSMEM} FAILED. Aborting job"
- fi
-
- rc=$((rc+ra))
-
- fi
-
- if [[ ${SENDDBN} = YES ]]; then
- fhr=${FHOUT}
- while [[ ${fhr} -le ${FHMAX} ]]; do
- FH3=$(printf %03i "${fhr}")
- if (( fhr % 3 == 0 )); then
- "${DBNROOT}/bin/dbn_alert" MODEL GFS_ENKF "${job}" "${COM_ATMOS_HISTORY}/${RUN}.t${cyc}z.sfcf${FH3}.nc"
- fi
- fhr=$((fhr+FHOUT))
- done
- fi
-
- cd "${DATATOP}"
-
- if [[ -s ${EFCSGRP} ]]; then
- ${NCP} "${EFCSGRP}" log_old
- fi
- [[ -f log ]] && rm log
- [[ -f log_new ]] && rm log_new
- if [[ ${ra} -ne 0 ]]; then
- echo "MEMBER ${ENSMEM} : FAIL" > log
- else
- echo "MEMBER ${ENSMEM} : PASS" > log
- fi
- if [[ -s log_old ]] ; then
- cat log_old log > log_new
- else
- cat log > log_new
- fi
- ${NCP} log_new "${EFCSGRP}"
-
-done
-
-################################################################################
-# Echo status of ensemble group
-cd "${DATATOP}"
-echo "Status of ensemble members in group ${ENSGRP}:"
-cat "${EFCSGRP}"
-[[ -f ${EFCSGRP}.fail ]] && rm "${EFCSGRP}".fail
-
-################################################################################
-# If any members failed, error out
-export err=${rc}; err_chk
-
-################################################################################
-# Postprocessing
-
-exit "${err}"
diff --git a/sorc/gdas.cd b/sorc/gdas.cd
index 9ba6bb9916..831b08a3f9 160000
--- a/sorc/gdas.cd
+++ b/sorc/gdas.cd
@@ -1 +1 @@
-Subproject commit 9ba6bb9916abf93de899d88a540904572bdbd7a7
+Subproject commit 831b08a3f947e8d743e2afbd6d38ecc4b0dec3b1
diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh
index 2e190e3285..6d5d40a354 100755
--- a/sorc/link_workflow.sh
+++ b/sorc/link_workflow.sh
@@ -130,15 +130,21 @@ cd "${HOMEgfs}/parm/ufs" || exit 1
${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_model.fd/tests/parm/noahmptable.tbl" .
cd "${HOMEgfs}/parm/post" || exit 1
-for file in postxconfig-NT-GEFS-ANL.txt postxconfig-NT-GEFS-F00.txt postxconfig-NT-GEFS.txt postxconfig-NT-GFS-ANL.txt \
- postxconfig-NT-GFS-F00-TWO.txt postxconfig-NT-GFS-F00.txt postxconfig-NT-GFS-FLUX-F00.txt postxconfig-NT-GFS-FLUX.txt \
- postxconfig-NT-GFS-GOES.txt postxconfig-NT-GFS-TWO.txt \
- postxconfig-NT-GFS.txt postxconfig-NT-gefs-aerosol.txt postxconfig-NT-gefs-chem.txt params_grib2_tbl_new \
- post_tag_gfs128 post_tag_gfs65 nam_micro_lookup.dat \
- AEROSOL_LUTS.dat optics_luts_DUST.dat optics_luts_SALT.dat optics_luts_SOOT.dat optics_luts_SUSO.dat optics_luts_WASO.dat
+for file in postxconfig-NT-GEFS-F00.txt postxconfig-NT-GEFS.txt postxconfig-NT-GEFS-WAFS.txt \
+ postxconfig-NT-GEFS-F00-aerosol.txt postxconfig-NT-GEFS-aerosol.txt \
+ postxconfig-NT-GFS-ANL.txt postxconfig-NT-GFS-F00.txt postxconfig-NT-GFS-FLUX-F00.txt \
+ postxconfig-NT-GFS.txt postxconfig-NT-GFS-FLUX.txt postxconfig-NT-GFS-GOES.txt \
+ postxconfig-NT-GFS-F00-TWO.txt postxconfig-NT-GFS-TWO.txt \
+ params_grib2_tbl_new post_tag_gfs128 post_tag_gfs65 nam_micro_lookup.dat
do
${LINK_OR_COPY} "${HOMEgfs}/sorc/upp.fd/parm/${file}" .
done
+for file in optics_luts_DUST.dat optics_luts_DUST_nasa.dat optics_luts_NITR_nasa.dat \
+ optics_luts_SALT.dat optics_luts_SALT_nasa.dat optics_luts_SOOT.dat optics_luts_SOOT_nasa.dat \
+ optics_luts_SUSO.dat optics_luts_SUSO_nasa.dat optics_luts_WASO.dat optics_luts_WASO_nasa.dat
+do
+ ${LINK_OR_COPY} "${HOMEgfs}/sorc/upp.fd/fix/chem/${file}" .
+done
cd "${HOMEgfs}/scripts" || exit 8
${LINK_OR_COPY} "${HOMEgfs}/sorc/ufs_utils.fd/scripts/exemcsfc_global_sfc_prep.sh" .
diff --git a/ush/detect_machine.sh b/ush/detect_machine.sh
index 01ae66a02d..8a719c10d9 100755
--- a/ush/detect_machine.sh
+++ b/ush/detect_machine.sh
@@ -1,10 +1,21 @@
#!/bin/bash
+# The authoritative copy of this script lives in the ufs-weather-model at:
+# https://github.com/ufs-community/ufs-weather-model/blob/develop/tests/detect_machine.sh
+# If any local modifications are made or new platform support added,
+# please consider opening an issue and a PR to the ufs-weather-model
+# so that this copy remains in sync with its authoritative source
+#
+# Thank you for your contribution
+
+# If the MACHINE_ID variable is set, skip this script.
+[[ -n ${MACHINE_ID:-} ]] && return
+
# First detect w/ hostname
case $(hostname -f) in
- adecflow0[12].acorn.wcoss2.ncep.noaa.gov) MACHINE_ID=wcoss2 ;; ### acorn
- alogin0[12].acorn.wcoss2.ncep.noaa.gov) MACHINE_ID=wcoss2 ;; ### acorn
+ adecflow0[12].acorn.wcoss2.ncep.noaa.gov) MACHINE_ID=acorn ;; ### acorn
+ alogin0[12].acorn.wcoss2.ncep.noaa.gov) MACHINE_ID=acorn ;; ### acorn
clogin0[1-9].cactus.wcoss2.ncep.noaa.gov) MACHINE_ID=wcoss2 ;; ### cactus01-9
clogin10.cactus.wcoss2.ncep.noaa.gov) MACHINE_ID=wcoss2 ;; ### cactus10
dlogin0[1-9].dogwood.wcoss2.ncep.noaa.gov) MACHINE_ID=wcoss2 ;; ### dogwood01-9
@@ -15,7 +26,7 @@ case $(hostname -f) in
gaea9.ncrc.gov) MACHINE_ID=gaea ;; ### gaea9
gaea1[0-6].ncrc.gov) MACHINE_ID=gaea ;; ### gaea10-16
- hfe0[1-9]) MACHINE_ID=hera ;; ### hera01-9
+ hfe0[1-9]) MACHINE_ID=hera ;; ### hera01-09
hfe1[0-2]) MACHINE_ID=hera ;; ### hera10-12
hecflow01) MACHINE_ID=hera ;; ### heraecflow01
@@ -28,10 +39,6 @@ case $(hostname -f) in
[Hh]ercules-login-[1-4].[Hh][Pp][Cc].[Mm]s[Ss]tate.[Ee]du) MACHINE_ID=hercules ;; ### hercules1-4
- cheyenne[1-6].cheyenne.ucar.edu) MACHINE_ID=cheyenne ;; ### cheyenne1-6
- cheyenne[1-6].ib0.cheyenne.ucar.edu) MACHINE_ID=cheyenne ;; ### cheyenne1-6
- chadmin[1-6].ib0.cheyenne.ucar.edu) MACHINE_ID=cheyenne ;; ### cheyenne1-6
-
login[1-4].stampede2.tacc.utexas.edu) MACHINE_ID=stampede ;; ### stampede1-4
login0[1-2].expanse.sdsc.edu) MACHINE_ID=expanse ;; ### expanse1-2
@@ -56,30 +63,30 @@ if [[ "${MACHINE_ID}" != "UNKNOWN" ]]; then
fi
# Try searching based on paths since hostname may not match on compute nodes
-if [[ -d /lfs/f1 ]] ; then
+if [[ -d /lfs/h3 ]]; then
# We are on NOAA Cactus or Dogwood
MACHINE_ID=wcoss2
-elif [[ -d /mnt/lfs1 ]] ; then
+elif [[ -d /lfs/h1 && ! -d /lfs/h3 ]]; then
+ # We are on NOAA TDS Acorn
+ MACHINE_ID=acorn
+elif [[ -d /mnt/lfs1 ]]; then
# We are on NOAA Jet
MACHINE_ID=jet
-elif [[ -d /scratch1 ]] ; then
+elif [[ -d /scratch1 ]]; then
# We are on NOAA Hera
MACHINE_ID=hera
-elif [[ -d /work ]] ; then
+elif [[ -d /work ]]; then
# We are on MSU Orion or Hercules
- if [[ -d /apps/other ]] ; then
+ if [[ -d /apps/other ]]; then
# We are on Hercules
MACHINE_ID=hercules
else
MACHINE_ID=orion
fi
-elif [[ -d /glade ]] ; then
- # We are on NCAR Yellowstone
- MACHINE_ID=cheyenne
-elif [[ -d /lustre && -d /ncrc ]] ; then
+elif [[ -d /gpfs && -d /ncrc ]]; then
# We are on GAEA.
MACHINE_ID=gaea
-elif [[ -d /data/prod ]] ; then
+elif [[ -d /data/prod ]]; then
# We are on SSEC's S4
MACHINE_ID=s4
else
diff --git a/ush/forecast_postdet.sh b/ush/forecast_postdet.sh
index 02f69c63ed..8e40d6c881 100755
--- a/ush/forecast_postdet.sh
+++ b/ush/forecast_postdet.sh
@@ -33,16 +33,14 @@ FV3_postdet(){
done
# Replace sfc_data with sfcanl_data restart files from current cycle (if found)
- if [[ "${MODE}" = "cycled" ]] && [[ "${CCPP_SUITE}" = "FV3_GFS_v16" ]]; then # TODO: remove if statement when global_cycle can handle NOAHMP
- for file in "${COM_ATMOS_RESTART}/${sPDY}.${scyc}0000."*.nc; do
- file2=$(basename "${file}")
- file2=$(echo "${file2}" | cut -d. -f3-) # remove the date from file
- fsufanl=$(echo "${file2}" | cut -d. -f1)
- file2=$(echo "${file2}" | sed -e "s/sfcanl_data/sfc_data/g")
- rm -f "${DATA}/INPUT/${file2}"
- ${NLN} "${file}" "${DATA}/INPUT/${file2}"
- done
- fi
+ for file in "${COM_ATMOS_RESTART}/${sPDY}.${scyc}0000."*.nc; do
+ file2=$(basename "${file}")
+ file2=$(echo "${file2}" | cut -d. -f3-) # remove the date from file
+ fsufanl=$(echo "${file2}" | cut -d. -f1)
+ file2=$(echo "${file2}" | sed -e "s/sfcanl_data/sfc_data/g")
+ rm -f "${DATA}/INPUT/${file2}"
+ ${NLN} "${file}" "${DATA}/INPUT/${file2}"
+ done
# Need a coupler.res when doing IAU # FIXME: This is needed for warm_start, regardless of IAU.
if [[ ${DOIAU} = "YES" ]]; then
diff --git a/ush/forecast_predet.sh b/ush/forecast_predet.sh
index 9bb565919a..8f46ed6ea0 100755
--- a/ush/forecast_predet.sh
+++ b/ush/forecast_predet.sh
@@ -117,15 +117,10 @@ FV3_predet(){
FV3_OUTPUT_FH=""
local fhr=${FHMIN}
if (( FHOUT_HF > 0 && FHMAX_HF > 0 )); then
- for (( fh = FHMIN; fh < FHMAX_HF; fh = fh + FHOUT_HF )); do
- FV3_OUTPUT_FH="${FV3_OUTPUT_FH} ${fh}"
- done
+ FV3_OUTPUT_FH="${FV3_OUTPUT_FH} $(seq -s ' ' "${FHMIN}" "${FHOUT_HF}" "${FHMAX_HF}")"
fhr=${FHMAX_HF}
fi
- for (( fh = fhr; fh <= FHMAX; fh = fh + FHOUT )); do
- FV3_OUTPUT_FH="${FV3_OUTPUT_FH} ${fh}"
- done
-
+ FV3_OUTPUT_FH="${FV3_OUTPUT_FH} $(seq -s ' ' "${fhr}" "${FHOUT}" "${FHMAX}")"
# Model resolution specific parameters
DELTIM=${DELTIM:-225}
@@ -134,8 +129,9 @@ FV3_predet(){
LEVS=${LEVS:-65}
# Other options
- MEMBER=${MEMBER:-"-1"} # -1: control, 0: ensemble mean, >0: ensemble member $MEMBER
- ENS_NUM=${ENS_NUM:-1} # Single executable runs multiple members (e.g. GEFS)
+ # ignore errors that variable isn't used
+ # shellcheck disable=SC2034
+ MEMBER=$(( 10#${ENSMEM:-"-1"} )) # -1: control, 0: ensemble mean, >0: ensemble member $MEMBER
PREFIX_ATMINC=${PREFIX_ATMINC:-""} # allow ensemble to use recentered increment
# IAU options
diff --git a/ush/hpssarch_gen.sh b/ush/hpssarch_gen.sh
index db39c2eac5..f1beb9469d 100755
--- a/ush/hpssarch_gen.sh
+++ b/ush/hpssarch_gen.sh
@@ -618,8 +618,8 @@ if [[ ${type} == "enkfgdas" || ${type} == "enkfgfs" ]]; then
fi
fi
done # loop over FHR
- for fstep in efcs epos ; do
- echo "logs/${PDY}${cyc}/${RUN}${fstep}*.log"
+ for fstep in fcst epos ; do
+ echo "logs/${PDY}${cyc}/${RUN}${fstep}*.log"
done
# eobs, ecen, esfc, and eupd are not run on the first cycle
diff --git a/ush/load_fv3gfs_modules.sh b/ush/load_fv3gfs_modules.sh
index b4f23fa331..48885c62e4 100755
--- a/ush/load_fv3gfs_modules.sh
+++ b/ush/load_fv3gfs_modules.sh
@@ -10,7 +10,7 @@ fi
ulimit_s=$( ulimit -S -s )
# Find module command and purge:
-source "${HOMEgfs}/modulefiles/module-setup.sh.inc"
+source "${HOMEgfs}/ush/module-setup.sh"
# Source versions file for runtime
source "${HOMEgfs}/versions/run.ver"
diff --git a/ush/load_ufsda_modules.sh b/ush/load_ufsda_modules.sh
index da8e2d8096..f15ae5666c 100755
--- a/ush/load_ufsda_modules.sh
+++ b/ush/load_ufsda_modules.sh
@@ -27,7 +27,7 @@ fi
ulimit_s=$( ulimit -S -s )
# Find module command and purge:
-source "${HOMEgfs}/modulefiles/module-setup.sh.inc"
+source "${HOMEgfs}/ush/module-setup.sh"
# Load our modules:
module use "${HOMEgfs}/sorc/gdas.cd/modulefiles"
@@ -44,20 +44,15 @@ elif [[ -d /scratch1 ]] ; then
# set NETCDF variable based on ncdump location
NETCDF=$( which ncdump )
export NETCDF
- # prod_util stuff, find a better solution later...
- module use /scratch2/NCEPDEV/nwprod/hpc-stack/libs/hpc-stack/modulefiles/compiler/intel/2022.1.2/
- module load prod_util
elif [[ -d /work ]] ; then
- # We are on MSU Orion
- # prod_util stuff, find a better solution later...
- #module use /apps/contrib/NCEP/hpc-stack/libs/hpc-stack/modulefiles/compiler/intel/2022.1.2/
- #module load prod_util
- export UTILROOT=/work2/noaa/da/python/opt/intel-2022.1.2/prod_util/1.2.2
- export MDATE=/work2/noaa/da/python/opt/intel-2022.1.2/prod_util/1.2.2/bin/mdate
- export NDATE=/work2/noaa/da/python/opt/intel-2022.1.2/prod_util/1.2.2/bin/ndate
- export NHOUR=/work2/noaa/da/python/opt/intel-2022.1.2/prod_util/1.2.2/bin/nhour
- export FSYNC=/work2/noaa/da/python/opt/intel-2022.1.2/prod_util/1.2.2/bin/fsync_file
- module load "${MODS}/orion"
+ # We are on MSU Orion or Hercules
+ if [[ -d /apps/other ]] ; then
+ # Hercules
+ module load "${MODS}/hercules"
+ else
+ # Orion
+ module load "${MODS}/orion"
+ fi
# set NETCDF variable based on ncdump location
ncdump=$( which ncdump )
NETCDF=$( echo "${ncdump}" | cut -d " " -f 3 )
diff --git a/ush/module-setup.sh b/ush/module-setup.sh
index fd656966bf..e204bae8a2 100755
--- a/ush/module-setup.sh
+++ b/ush/module-setup.sh
@@ -1,6 +1,8 @@
#!/bin/bash
set -u
+source "${HOMEgfs}/ush/detect_machine.sh"
+
if [[ ${MACHINE_ID} = jet* ]] ; then
# We are on NOAA Jet
if ( ! eval module help > /dev/null 2>&1 ) ; then
diff --git a/ush/python/pygfs/task/land_analysis.py b/ush/python/pygfs/task/land_analysis.py
index 307e875183..821caf2305 100644
--- a/ush/python/pygfs/task/land_analysis.py
+++ b/ush/python/pygfs/task/land_analysis.py
@@ -26,7 +26,7 @@ class LandAnalysis(Analysis):
Class for global land analysis tasks
"""
- NMEM_LANDENS = 2 # The size of the land ensemble is fixed at 2. Does this need to be a variable?
+ NMEM_LANDENS = 2
@logit(logger, name="LandAnalysis")
def __init__(self, config):
@@ -74,7 +74,7 @@ def prepare_GTS(self) -> None:
# create a temporary dict of all keys needed in this method
localconf = AttrDict()
keys = ['HOMEgfs', 'DATA', 'current_cycle', 'COM_OBS', 'COM_ATMOS_RESTART_PREV',
- 'OPREFIX', 'CASE', 'ntiles']
+ 'OPREFIX', 'CASE', 'OCNRES', 'ntiles']
for key in keys:
localconf[key] = self.task_config[key]
@@ -198,7 +198,7 @@ def prepare_IMS(self) -> None:
raise WorkflowException(f"An error occured during execution of {exe}")
# Ensure the snow depth IMS file is produced by the above executable
- input_file = f"IMSscf.{to_YMD(localconf.current_cycle)}.{localconf.CASE}.mx{localconf.OCNRES}_oro_data.nc"
+ input_file = f"IMSscf.{to_YMD(localconf.current_cycle)}.{localconf.CASE}_oro_data.nc"
if not os.path.isfile(f"{os.path.join(localconf.DATA, input_file)}"):
logger.exception(f"{self.task_config.CALCFIMSEXE} failed to produce {input_file}")
raise FileNotFoundError(f"{os.path.join(localconf.DATA, input_file)}")
@@ -249,7 +249,7 @@ def initialize(self) -> None:
# create a temporary dict of all keys needed in this method
localconf = AttrDict()
keys = ['DATA', 'current_cycle', 'COM_OBS', 'COM_ATMOS_RESTART_PREV',
- 'OPREFIX', 'CASE', 'ntiles']
+ 'OPREFIX', 'CASE', 'OCNRES', 'ntiles']
for key in keys:
localconf[key] = self.task_config[key]
@@ -301,7 +301,7 @@ def execute(self) -> None:
localconf = AttrDict()
keys = ['HOMEgfs', 'DATA', 'current_cycle',
'COM_ATMOS_RESTART_PREV', 'COM_LAND_ANALYSIS', 'APREFIX',
- 'SNOWDEPTHVAR', 'BESTDDEV', 'CASE', 'ntiles',
+ 'SNOWDEPTHVAR', 'BESTDDEV', 'CASE', 'OCNRES', 'ntiles',
'APRUN_LANDANL', 'JEDIEXE', 'jedi_yaml',
'APPLY_INCR_NML_TMPL', 'APPLY_INCR_EXE', 'APRUN_APPLY_INCR']
for key in keys:
@@ -530,6 +530,7 @@ def add_increments(config: Dict) -> None:
DATA
current_cycle
CASE
+ OCNRES
ntiles
APPLY_INCR_NML_TMPL
APPLY_INCR_EXE
diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py
index 1ff6cc3723..6dd0342a78 100644
--- a/workflow/applications/gfs_cycled.py
+++ b/workflow/applications/gfs_cycled.py
@@ -18,6 +18,7 @@ def __init__(self, conf: Configuration):
self.do_jediocnvar = self._base.get('DO_JEDIOCNVAR', False)
self.do_jedilandda = self._base.get('DO_JEDILANDDA', False)
self.do_mergensst = self._base.get('DO_MERGENSST', False)
+ self.do_vrfy_oceanda = self._base.get('DO_VRFY_OCEANDA', False)
self.lobsdiag_forenkf = False
self.eupd_cdumps = None
@@ -43,8 +44,9 @@ def _get_app_configs(self):
if self.do_jediocnvar:
configs += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat',
- 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost',
- 'ocnanalvrfy']
+ 'ocnanalrun', 'ocnanalchkpt', 'ocnanalpost']
+ if self.do_vrfy_oceanda:
+ configs += ['ocnanalvrfy']
if self.do_ocean:
configs += ['ocnpost']
@@ -137,8 +139,9 @@ def get_task_names(self):
if self.do_jediocnvar:
gdas_gfs_common_tasks_before_fcst += ['prepoceanobs', 'ocnanalprep',
'ocnanalbmat', 'ocnanalrun',
- 'ocnanalchkpt', 'ocnanalpost',
- 'ocnanalvrfy']
+ 'ocnanalchkpt', 'ocnanalpost']
+ if self.do_vrfy_oceanda:
+ gdas_gfs_common_tasks_before_fcst += ['ocnanalvrfy']
gdas_gfs_common_tasks_before_fcst += ['sfcanl', 'analcalc']
@@ -321,9 +324,4 @@ def get_gfs_cyc_dates(base: Dict[str, Any]) -> Dict[str, Any]:
base_out['EDATE_GFS'] = edate_gfs
base_out['INTERVAL_GFS'] = interval_gfs
- fhmax_gfs = {}
- for hh in ['00', '06', '12', '18']:
- fhmax_gfs[hh] = base.get(f'FHMAX_GFS_{hh}', base.get('FHMAX_GFS_00', 120))
- base_out['FHMAX_GFS'] = fhmax_gfs
-
return base_out
diff --git a/workflow/create_experiment.py b/workflow/create_experiment.py
index 7e0f350c0f..708cf432bf 100755
--- a/workflow/create_experiment.py
+++ b/workflow/create_experiment.py
@@ -63,7 +63,9 @@ def input_args():
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
- '--yaml', help='full path to yaml file describing the experiment configuration', type=Path, required=True)
+ '-y', '--yaml', help='full path to yaml file describing the experiment configuration', type=Path, required=True)
+ parser.add_argument(
+ '-o', '--overwrite', help='overwrite previously created experiment', action="store_true", required=False)
return parser.parse_args()
@@ -89,6 +91,9 @@ def input_args():
setup_expt_args.append(f"--{kk}")
setup_expt_args.append(str(vv))
+ if user_inputs.overwrite:
+ setup_expt_args.append("--overwrite")
+
logger.info(f"Call: setup_expt.main()")
logger.debug(f"setup_expt.py {' '.join(setup_expt_args)}")
setup_expt.main(setup_expt_args)
diff --git a/workflow/rocoto/gefs_tasks.py b/workflow/rocoto/gefs_tasks.py
index 154383f627..a72753eb90 100644
--- a/workflow/rocoto/gefs_tasks.py
+++ b/workflow/rocoto/gefs_tasks.py
@@ -7,7 +7,6 @@ class GEFSTasks(Tasks):
def __init__(self, app_config: AppConfig, cdump: str) -> None:
super().__init__(app_config, cdump)
- self.nmem = self._base['NMEM_ENS']
def stage_ic(self):
@@ -104,7 +103,7 @@ def fcst(self):
dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies)
resources = self.get_resource('fcst')
- task_name = f'fcst'
+ task_name = f'fcst_mem000'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -131,27 +130,29 @@ def efcs(self):
dependencies = rocoto.create_dependency(dep_condition='and', dep=dependencies)
efcsenvars = self.envars.copy()
- efcsenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#'))
-
- groups = self._get_hybgroups(self.nmem, self._configs['efcs']['NMEM_EFCSGRP'])
- var_dict = {'grp': groups}
+ efcsenvars_dict = {'ENSMEM': '#member#',
+ 'MEMDIR': 'mem#member#'
+ }
+ for key, value in efcsenvars_dict.items():
+ efcsenvars.append(rocoto.create_envar(name=key, value=str(value)))
resources = self.get_resource('efcs')
- task_name = f'efcs#grp#'
+ task_name = f'fcst_mem#member#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': efcsenvars,
'cycledef': 'gefs',
- 'command': f'{self.HOMEgfs}/jobs/rocoto/efcs.sh',
+ 'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': 'efmn',
- 'var_dict': var_dict,
+ member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])}
+ metatask_dict = {'task_name': 'fcst_ens',
+ 'var_dict': member_var_dict,
'task_dict': task_dict
}
diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py
index 36d5ab0def..9102c74e35 100644
--- a/workflow/rocoto/gfs_tasks.py
+++ b/workflow/rocoto/gfs_tasks.py
@@ -1330,8 +1330,7 @@ def _get_awipsgroups(cdump, config):
if cdump in ['gdas']:
fhrs = range(fhmin, fhmax + fhout, fhout)
elif cdump in ['gfs']:
- fhmax = np.max(
- [config['FHMAX_GFS_00'], config['FHMAX_GFS_06'], config['FHMAX_GFS_12'], config['FHMAX_GFS_18']])
+ fhmax = config['FHMAX_GFS']
fhout = config['FHOUT_GFS']
fhmax_hf = config['FHMAX_HF_GFS']
fhout_hf = config['FHOUT_HF_GFS']
@@ -2276,14 +2275,14 @@ def eomg(self):
dependencies = rocoto.create_dependency(dep=deps)
eomgenvars = self.envars.copy()
- eomgenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#'))
-
- groups = self._get_hybgroups(self._base['NMEM_ENS'], self._configs['eobs']['NMEM_EOMGGRP'])
-
- var_dict = {'grp': groups}
+ eomgenvars_dict = {'ENSMEM': '#member#',
+ 'MEMDIR': 'mem#member#'
+ }
+ for key, value in eomgenvars_dict.items():
+ eomgenvars.append(rocoto.create_envar(name=key, value=str(value)))
resources = self.get_resource('eomg')
- task_name = f'{self.cdump}eomg#grp#'
+ task_name = f'{self.cdump}eomg_mem#member#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
@@ -2295,8 +2294,9 @@ def eomg(self):
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}eomn',
- 'var_dict': var_dict,
+ member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])}
+ metatask_dict = {'task_name': f'{self.cdump}eomg',
+ 'var_dict': member_var_dict,
'task_dict': task_dict,
}
@@ -2332,7 +2332,7 @@ def eupd(self):
if self.app_config.lobsdiag_forenkf:
dep_dict = {'type': 'task', 'name': f'{self.cdump}ediag'}
else:
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}eomn'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.cdump}eomg'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -2540,31 +2540,30 @@ def efcs(self):
dependencies = rocoto.create_dependency(dep_condition='or', dep=dependencies)
efcsenvars = self.envars.copy()
- efcsenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#'))
-
- groups = self._get_hybgroups(self._base['NMEM_ENS'], self._configs['efcs']['NMEM_EFCSGRP'])
+ efcsenvars_dict = {'ENSMEM': '#member#',
+ 'MEMDIR': 'mem#member#'
+ }
+ for key, value in efcsenvars_dict.items():
+ efcsenvars.append(rocoto.create_envar(name=key, value=str(value)))
- if self.cdump == "enkfgfs":
- groups = self._get_hybgroups(self._base['NMEM_ENS_GFS'], self._configs['efcs']['NMEM_EFCSGRP_GFS'])
cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '')
resources = self.get_resource('efcs')
- var_dict = {'grp': groups}
-
- task_name = f'{self.cdump}efcs#grp#'
+ task_name = f'{self.cdump}fcst_mem#member#'
task_dict = {'task_name': task_name,
'resources': resources,
'dependency': dependencies,
'envars': efcsenvars,
'cycledef': cycledef,
- 'command': f'{self.HOMEgfs}/jobs/rocoto/efcs.sh',
+ 'command': f'{self.HOMEgfs}/jobs/rocoto/fcst.sh',
'job_name': f'{self.pslot}_{task_name}_@H',
'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log',
'maxtries': '&MAXTRIES;'
}
- metatask_dict = {'task_name': f'{self.cdump}efmn',
- 'var_dict': var_dict,
+ member_var_dict = {'member': ' '.join([str(mem).zfill(3) for mem in range(1, self.nmem + 1)])}
+ metatask_dict = {'task_name': f'{self.cdump}fcst',
+ 'var_dict': member_var_dict,
'task_dict': task_dict
}
@@ -2579,7 +2578,7 @@ def echgres(self):
deps = []
dep_dict = {'type': 'task', 'name': f'{self.cdump.replace("enkf","")}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
- dep_dict = {'type': 'task', 'name': f'{self.cdump}efcs01'}
+ dep_dict = {'type': 'task', 'name': f'{self.cdump}fcst_mem001'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep_condition='and', dep=deps)
@@ -2627,7 +2626,7 @@ def _get_eposgroups(epos):
return grp, dep, lst
deps = []
- dep_dict = {'type': 'metatask', 'name': f'{self.cdump}efmn'}
+ dep_dict = {'type': 'metatask', 'name': f'{self.cdump}fcst'}
deps.append(rocoto.add_dependency(dep_dict))
dependencies = rocoto.create_dependency(dep=deps)
@@ -2676,7 +2675,9 @@ def earc(self):
earcenvars = self.envars.copy()
earcenvars.append(rocoto.create_envar(name='ENSGRP', value='#grp#'))
- groups = self._get_hybgroups(self._base['NMEM_ENS'], self._configs['earc']['NMEM_EARCGRP'], start_index=0)
+ # Integer division is floor division, but we need ceiling division
+ n_groups = -(self.nmem // -self._configs['earc']['NMEM_EARCGRP'])
+ groups = ' '.join([f'{grp:02d}' for grp in range(0, n_groups)])
cycledef = 'gdas_half,gdas' if self.cdump in ['enkfgdas'] else self.cdump.replace('enkf', '')
diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py
index d16a009eff..110dc286b5 100644
--- a/workflow/rocoto/tasks.py
+++ b/workflow/rocoto/tasks.py
@@ -47,6 +47,7 @@ def __init__(self, app_config: AppConfig, cdump: str) -> None:
self.HOMEgfs = self._base['HOMEgfs']
self.rotdir = self._base['ROTDIR']
self.pslot = self._base['PSLOT']
+ self.nmem = int(self._base['NMEM_ENS'])
self._base['cycle_interval'] = to_timedelta(f'{self._base["assim_freq"]}H')
self.n_tiles = 6 # TODO - this needs to be elsewhere
@@ -73,12 +74,6 @@ def _set_envars(envar_dict) -> list:
return envars
- @staticmethod
- def _get_hybgroups(nens: int, nmem_per_group: int, start_index: int = 1):
- ngrps = nens / nmem_per_group
- groups = ' '.join([f'{x:02d}' for x in range(start_index, int(ngrps) + 1)])
- return groups
-
def _template_to_rocoto_cycstring(self, template: str, subs_dict: dict = {}) -> str:
'''
Takes a string templated with ${ } and converts it into a string suitable
@@ -135,8 +130,7 @@ def _get_forecast_hours(cdump, config) -> List[str]:
if cdump in ['gdas']:
fhrs = range(fhmin, fhmax + fhout, fhout)
elif cdump in ['gfs', 'gefs']:
- fhmax = np.max(
- [config['FHMAX_GFS_00'], config['FHMAX_GFS_06'], config['FHMAX_GFS_12'], config['FHMAX_GFS_18']])
+ fhmax = config['FHMAX_GFS']
fhout = config['FHOUT_GFS']
fhmax_hf = config['FHMAX_HF_GFS']
fhout_hf = config['FHOUT_HF_GFS']
diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py
index 7d7ac84aad..3eeb584f46 100755
--- a/workflow/setup_expt.py
+++ b/workflow/setup_expt.py
@@ -224,6 +224,13 @@ def link_files_from_src_to_dst(src_dir, dst_dir):
src_file = os.path.join(src_dir, fname)
if os.path.exists(src_file):
os.symlink(src_file, os.path.join(dst_dir, fname))
+ # First 1/2 cycle also needs a atmos increment if doing warm start
+ if inputs.start in ['warm']:
+ for ftype in ['atmi003.nc', 'atminc.nc', 'atmi009.nc']:
+ fname = f'{inputs.cdump}.t{idatestr[8:]}z.{ftype}'
+ src_file = os.path.join(src_dir, fname)
+ if os.path.exists(src_file):
+ os.symlink(src_file, os.path.join(dst_dir, fname))
return
@@ -316,7 +323,8 @@ def edit_baseconfig(host, inputs, yaml_dict):
"@EXP_WARM_START@": is_warm_start,
"@MODE@": inputs.mode,
"@gfs_cyc@": inputs.gfs_cyc,
- "@APP@": inputs.app
+ "@APP@": inputs.app,
+ "@NMEM_ENS@": getattr(inputs, 'nens', 0)
}
tmpl_dict = dict(tmpl_dict, **extend_dict)
@@ -324,7 +332,6 @@ def edit_baseconfig(host, inputs, yaml_dict):
if getattr(inputs, 'nens', 0) > 0:
extend_dict = {
"@CASEENS@": f'C{inputs.resensatmos}',
- "@NMEM_ENS@": inputs.nens,
}
tmpl_dict = dict(tmpl_dict, **extend_dict)
@@ -399,6 +406,8 @@ def _common_args(parser):
parser.add_argument('--idate', help='starting date of experiment, initial conditions must exist!',
required=True, type=lambda dd: to_datetime(dd))
parser.add_argument('--edate', help='end date experiment', required=True, type=lambda dd: to_datetime(dd))
+ parser.add_argument('--overwrite', help='overwrite previously created experiment (if it exists)',
+ action='store_true', required=False)
return parser
def _gfs_args(parser):
@@ -493,17 +502,19 @@ def _gefs_args(parser):
return parser.parse_args(list(*argv) if len(argv) else None)
-def query_and_clean(dirname):
+def query_and_clean(dirname, force_clean=False):
"""
Method to query if a directory exists and gather user input for further action
"""
create_dir = True
if os.path.exists(dirname):
- print()
- print(f'directory already exists in {dirname}')
- print()
- overwrite = input('Do you wish to over-write [y/N]: ')
+ print(f'\ndirectory already exists in {dirname}')
+ if force_clean:
+ overwrite = True
+ print(f'removing directory ........ {dirname}\n')
+ else:
+ overwrite = input('Do you wish to over-write [y/N]: ')
create_dir = True if overwrite in [
'y', 'yes', 'Y', 'YES'] else False
if create_dir:
@@ -553,8 +564,8 @@ def main(*argv):
rotdir = os.path.join(user_inputs.comroot, user_inputs.pslot)
expdir = os.path.join(user_inputs.expdir, user_inputs.pslot)
- create_rotdir = query_and_clean(rotdir)
- create_expdir = query_and_clean(expdir)
+ create_rotdir = query_and_clean(rotdir, force_clean=user_inputs.overwrite)
+ create_expdir = query_and_clean(expdir, force_clean=user_inputs.overwrite)
if create_rotdir:
makedirs_if_missing(rotdir)
@@ -565,6 +576,11 @@ def main(*argv):
fill_EXPDIR(user_inputs)
update_configs(host, user_inputs)
+ print(f"*" * 100)
+ print(f'EXPDIR: {expdir}')
+ print(f'ROTDIR: {rotdir}')
+ print(f"*" * 100)
+
if __name__ == '__main__':