[Ncep.list.nems.announce] nems r94669: Emergency bug fixes and documentation updates. All Th...

Samuel.Trahan at noaa.gov Samuel.Trahan at noaa.gov
Thu Jun 22 14:53:28 UTC 2017


Friendly NEMS developers,

This is an automated email about a NEMS commit.

Project: nems
URL: https://svnemc.ncep.noaa.gov/projects/nems/trunk
Revision: 94669
Author:   samuel.trahan at noaa.gov
Date:     2017-06-19T21:29:45.810572Z
Message:
Emergency bug fixes and documentation updates.  All Theia and WCOSS bugs thus far should be fixed.


See attached file for full differences.


First 4000 bytes of differences:
Index: checkout/tests/rtgen
===================================================================
--- checkout/tests/rtgen	(revision 93212)
+++ checkout/tests/rtgen	(revision 94669)
@@ -182,13 +182,12 @@
     altpath=os.path.join(os.path.dirname(os.path.realpath(__file__)),'produtil/ush')
     if not os.path.isdir(altpath):
         fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
-        sys.path.append(altpath)
+    sys.path.append(altpath)
     try:
         import produtil.testing
         import produtil.setup
     except ImportError as ie2:
-        if not os.path.isdir(altpath):
-            fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
+        fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
 
 import produtil.run, produtil.cluster, produtil.fileop
 
@@ -195,7 +194,7 @@
 from produtil.log import jlogger
 from produtil.run import runstr, ExitStatusException, checkrun, batchexe, run
 from produtil.testing.testgen import TestGen
-from produtil.testing.utilities import BASELINE, EXECUTION, bashify_string
+from produtil.testing.utilities import BASELINE, EXECUTION, bashify_string, PTParserError
 from produtil.testing.rocoto import RocotoRunner
 from produtil.testing.setarith import ArithKeyError
 
@@ -306,7 +305,7 @@
     nothing went wrong"""
     sys.stderr.write(RTGEN_SHORT_USAGE_MESSAGE)
     if reason:
-        sys.stderr.write('\nSCRIPT IS ABORTING BECAUSE: %s\n'%(reason,))
+        sys.stderr.write('\nScript is aborting because:\n%s\n'%(reason,))
         exit(1)
     exit(0)
 
@@ -373,7 +372,7 @@
 def rtsh_usage(reason):
     sys.stderr.write(RTSH_USAGE_SIMPLE)
     if reason:
-        sys.stderr.write('\nSCRIPT IS ABORTING BECAUSE: %s\n'%(reason,))
+        sys.stderr.write('\nScript is aborting because:\n%s\n'%(reason,))
         exit(1)
     exit(0)
 
@@ -785,7 +784,7 @@
             scope.override_local([scope],'plat%CPU_ACCOUNT',self.project)
             scope.override_local([scope],'plat%ACCOUNT',self.project)
         else:
-            raise Exception('no project')
+            raise PTParseError('no project')
     @property
     def new_baseline(self):
         return self._new_baseline
@@ -990,7 +989,7 @@
         optval,arglist=getopt.getopt(sys.argv[1:],"c:fst:n:hr:p:b",[
                 'project=', 'mode=', 'baseline-dir=', 'baseline',
                 'dry-run', 'verbose', 'unique-id=', 'temp-dir=', 
-                'resume='])
+                'resume=','compset='])
     except getopt.GetoptError as ge:
         rtsh_usage(str(ge))
 
@@ -1010,8 +1009,10 @@
     resume_sets=None
 
     for opt,val in optval:
-        if opt in ['-f','-s','-c','-t'] and sets is not None:
-            rtsh_usage('Only one of -c, -s, -t, or -f can be used.')
+        if opt in ['--compset','-f','-s','-c','-t'] and sets is not None:
+            rtsh_usage('Only one of --compset, -c, -s, -t, or -f can be used.\n'
+                       'For multiple compset groups, use the set notation.\n'
+                       'Run with -h for more information')
         if opt=='--verbose':
             verbose+=1
         elif opt in [ '-h', '--help' ]:
@@ -1026,9 +1027,14 @@
             sets=str(val)
         elif opt=='-c':
             if val in [ 'ompset', 'ompsets' ]:
-                rtsh_usage('The -compset argument is no longer recognized.  Use "{compset1,compset2,compset3}" instead.  Run with -h for more information.')
+                rtsh_usage('The -compset argument is no longer recognized.\n'
+                           'Use --compset compsetname\n'
+                           ' or "{compset1,compset2,compset3}"\n'
+                           'Run with -h for more information.')
             sets=str(val)
             baseline=True
+        elif opt == '--compset':
+            sets='{'+str(val)+'}'
         elif opt in ['-n', '


... see attachment for the rest ...
-------------- next part --------------
Index: checkout/tests/rtgen
===================================================================
--- checkout/tests/rtgen	(revision 93212)
+++ checkout/tests/rtgen	(revision 94669)
@@ -182,13 +182,12 @@
     altpath=os.path.join(os.path.dirname(os.path.realpath(__file__)),'produtil/ush')
     if not os.path.isdir(altpath):
         fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
-        sys.path.append(altpath)
+    sys.path.append(altpath)
     try:
         import produtil.testing
         import produtil.setup
     except ImportError as ie2:
-        if not os.path.isdir(altpath):
-            fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
+        fail('%s is missing and a valid produtil is not in PYTHONPATH.  Is your produtil external missing?'%(altpath,))
 
 import produtil.run, produtil.cluster, produtil.fileop
 
@@ -195,7 +194,7 @@
 from produtil.log import jlogger
 from produtil.run import runstr, ExitStatusException, checkrun, batchexe, run
 from produtil.testing.testgen import TestGen
-from produtil.testing.utilities import BASELINE, EXECUTION, bashify_string
+from produtil.testing.utilities import BASELINE, EXECUTION, bashify_string, PTParserError
 from produtil.testing.rocoto import RocotoRunner
 from produtil.testing.setarith import ArithKeyError
 
@@ -306,7 +305,7 @@
     nothing went wrong"""
     sys.stderr.write(RTGEN_SHORT_USAGE_MESSAGE)
     if reason:
-        sys.stderr.write('\nSCRIPT IS ABORTING BECAUSE: %s\n'%(reason,))
+        sys.stderr.write('\nScript is aborting because:\n%s\n'%(reason,))
         exit(1)
     exit(0)
 
@@ -373,7 +372,7 @@
 def rtsh_usage(reason):
     sys.stderr.write(RTSH_USAGE_SIMPLE)
     if reason:
-        sys.stderr.write('\nSCRIPT IS ABORTING BECAUSE: %s\n'%(reason,))
+        sys.stderr.write('\nScript is aborting because:\n%s\n'%(reason,))
         exit(1)
     exit(0)
 
@@ -785,7 +784,7 @@
             scope.override_local([scope],'plat%CPU_ACCOUNT',self.project)
             scope.override_local([scope],'plat%ACCOUNT',self.project)
         else:
-            raise Exception('no project')
+            raise PTParseError('no project')
     @property
     def new_baseline(self):
         return self._new_baseline
@@ -990,7 +989,7 @@
         optval,arglist=getopt.getopt(sys.argv[1:],"c:fst:n:hr:p:b",[
                 'project=', 'mode=', 'baseline-dir=', 'baseline',
                 'dry-run', 'verbose', 'unique-id=', 'temp-dir=', 
-                'resume='])
+                'resume=','compset='])
     except getopt.GetoptError as ge:
         rtsh_usage(str(ge))
 
@@ -1010,8 +1009,10 @@
     resume_sets=None
 
     for opt,val in optval:
-        if opt in ['-f','-s','-c','-t'] and sets is not None:
-            rtsh_usage('Only one of -c, -s, -t, or -f can be used.')
+        if opt in ['--compset','-f','-s','-c','-t'] and sets is not None:
+            rtsh_usage('Only one of --compset, -c, -s, -t, or -f can be used.\n'
+                       'For multiple compset groups, use the set notation.\n'
+                       'Run with -h for more information')
         if opt=='--verbose':
             verbose+=1
         elif opt in [ '-h', '--help' ]:
@@ -1026,9 +1027,14 @@
             sets=str(val)
         elif opt=='-c':
             if val in [ 'ompset', 'ompsets' ]:
-                rtsh_usage('The -compset argument is no longer recognized.  Use "{compset1,compset2,compset3}" instead.  Run with -h for more information.')
+                rtsh_usage('The -compset argument is no longer recognized.\n'
+                           'Use --compset compsetname\n'
+                           ' or "{compset1,compset2,compset3}"\n'
+                           'Run with -h for more information.')
             sets=str(val)
             baseline=True
+        elif opt == '--compset':
+            sets='{'+str(val)+'}'
         elif opt in ['-n', '--baseline-dir']:
             baseline_dir=val
         elif opt in ['-p', '--project']:
@@ -1257,13 +1263,6 @@
     except ArithKeyError as ake:
         # User specified an invalid set or test.  Give the usage message.
         usage(str(ake))
-    except Exception as e:
-        # Complain about errors in the jlogfile and exit with status 1.
-
-        # Note that this will not catch high-level errors such as
-        # signals or exit.  That is deliberate, for safety reasons.
-        produtil.log.jlogger.error(str(e),exc_info=True)
-        exit(1)
     # We get here if everything works.
     jlogger.info('Requested test has been generated.')
     if script_mode:
@@ -1307,7 +1306,8 @@
         trydir=os.path.join(here,rel)
         if os.path.exists(os.path.join(trydir,'NEMS/src/conf')):
             return trydir
-    raise Exception("Cannot find app directory (parent of NEMS).  Looked for NEMS/src/conf relative to ., .., ../.., and ../../.. but found none.")
+    sys.stderr.write("Cannot find app directory (parent of NEMS).  Looked for NEMS/src/conf relative to ., .., ../.., and ../../.. but found none.\n")
+    sys.exit(1)
 
 def run_rtreport(run_dir,app_dir,platform,logger):
     jlogger.info('generate report')
@@ -1356,7 +1356,16 @@
 ########################################################################
 
 def main():
+    try:
+        main_impl()
+    except PTParserError as p:
+        name=type(p).__name__
+        if name.find('KeyError')>=0:
+            name='undefined value'
+        sys.stderr.write('Error in compset: %s %s\nSee earlier lines for details.\n'%(name,str(p)))
 
+def main_impl():
+
     ## Ensure we're in the NEMS/tests directory:
     if not os.path.isdir('produtil') or not os.path.exists('rtgen'):
         os.chdir(os.path.dirname(os.path.realpath(__file__)))
Index: checkout/tests/rtrewindimpl
===================================================================
--- checkout/tests/rtrewindimpl	(revision 93212)
+++ checkout/tests/rtrewindimpl	(revision 94669)
@@ -23,6 +23,8 @@
 else:
     joblist=sys.argv[1:]
 
+jobfound=[False]*len(joblist)
+
 command="rocotorewind -w workflow.xml -d workflow.db"
 
 sent_cycle=False
@@ -30,7 +32,8 @@
 def process_line(line,m):
     match=False
     if joblist is not None:
-        for job in joblist:
+        for ijob in xrange(len(joblist)):
+            job=joblist[ijob]
             task=m.group(2)
             if task=="test_"+job:
                 match="test_"+job
@@ -39,6 +42,7 @@
             if task==job:
                 match=job
             if match:
+                jobfound[ijob]=True
                 break
 
         if not match: 
@@ -52,11 +56,23 @@
     if match:
         command+=" -t "+match
 
-for line in sys.stdin:
+for line in sys.stdin.readlines():
     m=re.match('^(\d+)\s+(\S+)',line)
     if m:
         process_line(line,m)
 
+bad=True
+for ijob in xrange(len(joblist)):
+    if not jobfound[ijob]:
+        sys.stderr.write(' ===> no such job: %s <==\n'%(joblist[ijob],))
+    else:
+        bad=False
+
+if bad:
+    sys.stderr.write(' ===> No jobs found <===\n')
+    print '/bin/false'
+    sys.exit(1)
+
 if joblist is None:
     command+=' -a'
 
Index: checkout/tests
===================================================================
--- checkout/tests	(revision 93212)
+++ checkout/tests	(revision 94669)

Property changes on: checkout/tests
___________________________________________________________________
Modified: svn:externals
## -1 +1 ##
-produtil -r93052    https://svnemc.ncep.noaa.gov/projects/nceplibs/produtil/branches/regtests-run
+produtil -r93653    https://svnemc.ncep.noaa.gov/projects/nceplibs/produtil/branches/regtests-run
Modified: svn:mergeinfo
## -0,0 +0,5 ##
   Merged /nems/branches/update-docs/tests:r93396-94104
   Merged /nems/branches/gocart-fix/tests:r93814-94233
   Merged /nems/branches/merge-fixes/tests:r94234-94668
   Merged /nems/branches/NEMSUpdate/UGCSWeather/tests:r93382-94235,94237-94655
   Merged /nems/branches/regtests/tests:r92938-93574
Index: checkout/doc/NEMS.md
===================================================================
--- checkout/doc/NEMS.md	(revision 93212)
+++ checkout/doc/NEMS.md	(nonexistent)
@@ -1,44 +0,0 @@
-NEMS Directory Structure
-========================
-
-The NEMS directory contains the source code and test scripts for the
-NEMS.  Most of the documentation is in the `doc` subdirectory or in
-the `../doc/` directory.  Most of the files that were in the NEMS have
-been moved to the application layer, discussed below.  Further
-documentation, specific to the app, is also at the app level.
-
-Within NEMS resides:
-
-* `exe` - NEMS.x and other executables built from `src`
-* `src` - main program for NEMS
- * `ENS_Cpl` - The Ensemble coupler directory.
- * `conf` - various compliation specifications
-* `doc` - documentation.
-* `NEMSAppBuilder` - a script to build NEMS, as discussed elsewhere in the
-  documentation
-* `NEMSCompsetRun` - script to run NEMS, identical to the regression test runner
-* `OldCompsetRun` - prior version of the compset runner
-* `tests` - test execution logic
-  * `rtgen` - front-end to the regression test runner
-  * `rt.sh` - wrapper around rtgen for users familiar with the old system
-
-At the application level resides these files:
-
-* `doc` - application-specific documentation
-
-* `oldtests` - application-specific, old, test suite which is
-   deprecated but retained for backward compatibility
-
-* `compsets` - configuration for the NEMSCompsetRun and regression
-   test runner
-
-* `oldcompsets` - configuration for the old compset system available
-   via OldCompsetRunner
-
-* `modulefiles` - module loading information for each platform
-  * `theia` - NOAA Theia modulefiles
-  * `wcoss.phase1` - WCOSS Phase 1 modulefiles
-  * ... other directories for other computers ...
-* `conf` - configuration for NEMS/src/configure
-* `parm` - parameter files for the test suites
-* `log` - log directory for the NEMSAppBuilder and NEMSCompsetRun
\ No newline at end of file
Index: checkout/doc/markdown.md
===================================================================
--- checkout/doc/markdown.md	(revision 93212)
+++ checkout/doc/markdown.md	(nonexistent)
@@ -1,20 +0,0 @@
-Markdown
---------
-
-The webpage is generated from documentation in the NEMS repository.
-Markdown is a human-readable, wiki-like syntax that can easily be
-converted to other formats.  The Markdown files are converted to HTML
-via Trent Mick's markdown2.py tool.  For details on Markdown, see:
-
-* [Markdown website](https://daringfireball.net/projects/markdown/)
-* [markdown2.py website](https://github.com/trentm/python-markdown2/)
-
-This documentation is stored in the NEMSLegacy doc directory.  The
-website can be regenerated by doing this:
-
-    svn co https://svnemc.ncep.noaa.gov/projects/nems/apps/(appname)/trunk
-    cd trunk/NEMS/doc
-    make
-
-That creates the `README.html` and `README.css` that you view on your
-browser.
Index: checkout/doc/README.NMM.md
===================================================================
--- checkout/doc/README.NMM.md	(revision 93212)
+++ checkout/doc/README.NMM.md	(nonexistent)
@@ -1,354 +0,0 @@
-NMM Instructions
-----------------
-
-### How to use restart:
-
-1. Keep same end time (nhours_fcst: in config file) like in original run.
-
-2. Change restart: argument in config file from false to true
-   That's the only change in config file.
-
-3. The only difference from original run script is you don't use
-   main_input_filename.  Instead, you use restart_file_XX_nemsio which
-   you get from original restart output file
-   nmmb_rst_XX_nio_HHHHh_00m_00.00s where XX is the domain ID and HHHH
-   is the forecast hour of the restart time.
-
-Limitations:
-
-1. In order to keep bit-identical results, restart must be written
-   (used) on full hour
-
-2. Restart cannot be more frequent than history output and must be
-   multiplier of history output i.e. if history is written on 3 hours,
-   model can be restarted on 3, 6, 9, ... hours (need to be fixed
-   later)
-
-TODO:
-
-1. Allow writing restart file at any time in forecast
-
-
-### How to use time series output in NMMB:
-
-Time series output in NMMB is optional output that is turned on by
-providing appropriate namelist file in run directory. The name of that
-file must be ts_locations.nml, and the contents of the file is as
-follows:
-
-    &TS_LOCATIONS
-    NPOINTS=2,
-    POINTS_LON=-106.0, -110.0,
-    POINTS_LAT=54.0, 50.0
-    /
-
-where NPOINTS defines number of locations and POINTS_LON,POINTS_LAT
-are arrays of longitudes and latitudes of selected points in degrees
-(-180.0 to 180.0).
-
-The output filenames are ts_p01_d01.bin,ts_p02_d01.bin,
-ts_p01_d02.bin,ts_p02_d02.bin etc.  The p01 indicates the point number
-from 1 to NPOINTS and d01,d02 indicate domain number
-
-The ncarg program tsplot that can be used to plot time series is
-located in `/u/wx20du/plot_timeseries`.  It requires a control file as a
-command line argument. For example if the control file is named
-tsplotsetup_nmm you will need to run:
-
-    $ ./tsplot tsplotsetup_nmm
-
-which will create gmeta file. Sample control file (tsplotsetup_nmm) is
-also located in `/u/wx20du/plot_timeseries` directory.
-
-
-Nesting
--------
-
-The NMM-B has telescoping static and moving nest capability.  All
-domains, whether the uppermost parent or any nest, are functionally
-equivalent and thus each needs its own configure file.  Both 1-way and
-2-way interaction between parent and child domains are available.
-
-### For 1-way nesting:
-
-1. Set 'nest_mode' to '1-way' in all configure files.  The value of
-   'generation' is not relevant and can be ignored.
- 
-2. The uppermost parent's configure file:
-
-    a. The variable 'num_domains_total' must be set in this domain's
-       configure file.  This is the total number of domains in the run
-       which includes the upper parent plus all nests.  This variable
-       does not need to be set in any other configure files (if it is
-       set in others it is not read).
-
-    b. Set the value for 'my_domain_id' which must always be 1 for the
-       uppermost parent.
-
-    c. Set the value for 'my_parent_id' to -999.
-
-    d. Set 'n_children' to the number of child nests associated with
-       the uppermost parent.  This does not include any nests inside
-       the first generation of child nests because all interactions
-       with nesting involve only a parent and its first generation of
-       children.
-
-    e. Set 'my_domain_moves' to false.
-
-3. Static nest configure files:
-
-    a. In each nest's configure file set 'my_domain_id' to a unique
-       integer greater than 1.  The user is free to choose these
-       integer identifiers in any way desired except that all domain
-       IDs must ultimately form a monotonic sequence.  In other words
-       if the run contains 2 first generation nests and one of those
-       nests contains a nest then the three nests may use any integer
-       value between 2 and 4 as their domain ID so that the final IDs
-       are 1,2,3, and 4 but never something like 1,2,4,5 which is not
-       a monotonic sequence.
-
-    b. Set the value for 'my_parent_id' to the integer ID that was
-       given to this nest's parent domain.
-
-    c. Set 'n_children' to the number of child nests inside of this
-       nest but not counting any deeper nests inside of those children.
-
-    d. Set 'i_parent_start' and 'j_parent_start' to the I and J
-       indices of the H point on the nest's parent's grid that
-       coincide with the nest's SW corner H point.  This implies that
-       any nest's SW corner must lie directly on a parent grid H
-       point.
-
-    e. Set 'parent_child_space_ratio' to the ratio of the size of the
-       parent's grid increment to the child's.  Make this an integer.
-
-    f. Set 'input_ready' to true if input data has already been
-       produced for this nest.  Set it to false if input data has not
-       been produced and the user wants the parent to generate the
-       nest's input data.  NPS-generated input data is naturally
-       preferable.
-
-    g. Set 'my_domain_moves' to false.
-
-4. Moving nest configure files.  See regrtession test examples: 1tests/nmm_conf/nmm_mnests*conf_*`
-
-    a. Follow all instructions in 3(a)-(f).  
-
-    b. Set 'my_domain_moves' to true.
-
-    c. Set 'ratio_sfc_files' to the ratio of the uppermost parent's
-       grid increment to this moving nest's.  Again this should be an
-       integer.  The use of moving nests requires the user to generate
-       eight different surface-related static datafiles for each
-       different resolution of nest in the run.  If there are two
-       moving nests with parent_child_space_ratio=3 then a set of the
-       following eight files must be pre-generated: ALBASE_ij_3,
-       FIS_ij_3, ISLTYP_ij_3, IVGTYP_ij_3, MXSNAL_ij_3, SM_ij_3,
-       TG_ij_3, and VEGFRC_ij_3.  These are the base albedo, sfc
-       geopotential, soil type, vegetation type, maximum snow albedo,
-       sea mask, deep underground temperature, and vegetation
-       fraction, respectively, at the 3x nests' resolution but which
-       span the entire upper parent domain.
-
-       This data must be present as the nests move across the parent's
-       domain.  Then assume one of the 3x moving nests contains a 3x
-       moving nest inside it.  In the configure file for the inner
-       nest the value of ratio_sfc_files would be 9 and the eight sfc
-       datafiles would contain 9x data that spans the entire upper
-       parent's domain.  Note that the final integer in these files'
-       names must be the value of ratio_sfc_files.
-
-    d. Set the values of 'nrows_p_upd_w', 'nrows_p_upd_e',
-       'nrows_p_upd_s', and 'nrows_p_upd_n' to 2.  This is the number
-       of rows around the edge of the nest domain that must be updated
-       by the parent after the nest moves.  The nest does not use its
-       own data in these rows for updating itself because V is not
-       present on the north or east sides and some variables needed in
-       the integration part of the domain are not computed in these
-       rows.
-
-    e. If a moving nest has a child moving nest then for the outer
-       nest set the value of 'centers_distance'.  This is the distance
-       in units of the outer nest's grid increments that the inner
-       nest's center can move from the outer nest's center before the
-       outer nest shifts so as to bring their centers very near to
-       each other again.
-
-    f. If the uppermost parent domain is global then set the value of
-       'latitude_limit'.  If a nest domain (or the outermost nest in a
-       telescoping complex of nests) reaches this latitude in either
-       hemisphere then the nest stops and never moves again.  Thus the
-       nest's delta X cannot become too small due to converging
-       meridians which would result in violation of the CFL criterion.
-
-    g. The file called nest.txt must be present in the working
-       directory.  The file's 2nd column holds critical specifications
-       regarding variables in the Solver internal state when nests
-       move.  An explanation is given at the beginning of that file.
-
-5. Task assignment: When 1-way nesting is used then the user assigns
-   forecast (compute) tasks and write (quilt) tasks uniquely for each
-   domain in that domain's configure file.  The I by J layout of
-   forecast tasks are specified with configure variables inpes and
-   jnpes, respectively.  Any number of groups of write tasks can be
-   assigned with the variable called write_groups.  More than one
-   write group should be used if the integration might move from one
-   output time to the next before write tasks have finished with the
-   earlier output.
-
-   The number of tasks in each write group is assigned with the
-   variable called write_tasks_per_group.  The sum of
-   `inpes*jnpes+write_groups*write_tasks_per_group` for all domains must
-   equal the number of tasks that are assigned to the run in the
-   runscript.  This task assignment lets the user fine-tune the
-   balance of work being done on all domains to minimize the time that
-   any parent or child waits for the other thus leading to all compute
-   tasks being busy virtually all the time as all domains integrate
-   their forecasts simultaneously.
-
-6. Configure file names: The run script will copy each configure file
-   to configure_file_01, configure_file_02, etc. where the final
-   integers on the filenames form a monotonic sequence.  The uppermost
-   parent's configure file must be used for configure_file_01 but the
-   user is not required to make the remaining files' names contain the
-   same integer as their corresponding configure files' domain IDs.
-
-### For 2-way nesting
-
-1. Set 'nest_mode' to '2-way' in all configure files.  The integer
-   value of each domain's generation must be given to the variable
-   called 'generation'.  The generation variable is ignored in 1-way
-   mode.
-
-2. The nests.txt file must be present in the working directory.  The
-   file's 3rd column specifies which of the Solver's internal state
-   variables will be used in the 2-way exchange from child to parent.
-   Currently 2-D and 3-D real variables may be selected.  As stated in
-   that file's instructions, an H is used to specify that the given
-   H-pt variable is to be part of the 2-way exchange while a V
-   indicates that the given V-pt variable is to be part of the 2-way
-   exchange.
-
-3. The same rules apply for running static or moving nests in 2-way
-   nesting as in 1-way nesting described above.
- 
-4. Task assignments for 2-way interaction cannot be done in the same
-   way as they are for 1-way because that would leave too many
-   processors idle at any given time as children and parents wait on
-   each other to send internal update values and vice versa.
-   Therefore the integration in 2-way nesting will continually cycle
-   through the generations sequentially but within each generation all
-   domains will execute their forecasts concurrently.  To maximize
-   overall efficiency the user first decides which generation of
-   domains will be the most computationally expensive.  Then ALL
-   available compute and forecast tasks in the run are assigned uniquely
-   to the domains in that generation where they can be balanced so
-   that when this generation executes then all compute tasks will be
-   busy.
-
-   As many of the total number of available compute tasks are assigned
-   to each of the remaining generations as can be used efficiently,
-   i.e., assigning more compute tasks to the generation would not
-   decrease runtime or would increase it due to a large amount of halo
-   exchanges for task subdomains that are too small.  So that the
-   writing of history and restart output remains asynchronous all
-   write and quilt tasks must still be assigned uniquely to each
-   indiviual domain and cannot be shared among different domains as
-   the compute tasks are.  Therefore the sum of `inpes*jnpes` for all
-   domains in the most expensive generation plus the sum of
-   write_groups times write_tasks_per_group for all domains must equal
-   the total number of tasks assigned to the run in the runscript.
-
-### Specifying nest boundary variables
-
-The boundary variables in single-domain regional runs and for the
-(regional) upper parent in nested runs are hardwired to PD,T,Q,CW,U,V.
-However the user specifies which variables are desired for the nests'
-domain boundaries.  This is done through the external nests.txt file.
-A column labeled 'BC' in that file is used for this purpose.  The user
-places an 'H' or a 'V' in that column for the 2-D, 3-D, or 4-D Solver
-internal state H-pt or V-pt variables that will be updated by the
-parent(s) on the nest boundaries.  If the desired Solver internal
-state variable is not listed in nests.txt then simply add it.  If the
-desired variable is not yet in the Solver internal state then see the
-section below called 'How to add a new variable'.  The copy of
-nests.txt in job and regression_tests currently specifies PD,T,Q,CW,U,V as
-the nest boundary variables.
-
-### How to add a new variable:
-
-1. Go to `~/src/atmos/nmm` directory
-
-2. In file module_SOLVER_INTERNAL_STATE.F90 search for similar
-   variable; in this case let's use 2D variable ALBEDO as an example.
-
-    a. First command is declaration of the pointer, add your variable
-       to that command
-
-    b. Search further and next call is allocating size of your new variable:
-
-            CALL SET_VAR_PTR(int_state%VARS,NV,AF,'ALBEDO',int_state%ALBEDO,(/IMS,JMS/),(/IME,JME/))
-
-    In most cases you'll need just to copy this line and change ALBEDO
-       to the name of your variable.
-
-3. Now, your variable is placed in internal state, allocated and given
-   initial value to NaN!!!  If you want to initialize a physics
-   variable with different number, in same directory open
-   module_SOLVER_GRID_COMP.F90, go to subroutine PHYSICS_INITIALIZE,
-   search for string "Initialize allocated arrays" and add your
-   variable to appropriate place or to the call to the appropriate
-   physics scheme's initialization subroutine and initialize with
-   desired value.
-
-Same procedure should be done with 3D and 4D arrays.
-
-
-### Adding variable to the history and/or restart file:
-
-1. If this is a new (non-existing) variable in internal state, go
-   through steps 1-3 in previous section.
-
-2. When you have existing variable in internal state, go to
-   `~/job/regression_tests` directory, and find the file called
-   solver_state.txt.
-
-3. Again let's use ALBEDO as an example, open file solver_state.txt and
-   search for ALBEDO.
-
-    a. copy and paste line:
-
-            'ALBEDO'     H       R       O      -      -   T  'Dynamic albedo'
-
-    b. rename ALBEDO to the name of the variable you used in step 2b
-      when you added the variable, then give short description in the
-      8th column.
-
-4. There are 8 columns in the file: Name History Restart Owned Import
-   eXport Time_series Description
-
-    a. If you want your variable in History file, leave letter H in
-       second column, if not, just leave dash.
-
-    b. If you want your variable in Restart file, leave letter R in
-       third column, if not, just leave dash.
-
-    c. If you want your variable as a part of Time series, leave
-       letter T in seventh column, if not, just leave dash.
-
-5. Columns 4, 5 and 6 are part of "ownership suite" and are intended
-   to be used for exchange between dynamics and physics without
-   necessary (duplicate) allocations.
-
-       - O is for owned
-       - X is for export
-       - I is for import
-
-   Designate 'O' for most new variables which will tell the code to
-   allocate memory for it.  Only if you know the variable will be used
-   as an unallocated pointer into another variable that has been
-   allocated will you designate a blank ('-').  X/I are used to
-   specify which variables must be exported/imported between the
-   Solver and the component that handles boundaries and motion of
-   nests.  Specify blanks ('-') unless you are certain the new
-   variable is required for nests' boundaries and/or motion.
Index: checkout/doc/README.css
===================================================================
--- checkout/doc/README.css	(revision 93212)
+++ checkout/doc/README.css	(nonexistent)
@@ -1,100 +0,0 @@
-body {
-    margin: auto;
-    padding-right: 1em;
-    padding-left: 1em;
-    max-width: 44em; 
-    border-left: 1px solid black;
-    border-right: 1px solid black;
-    color: black;
-    font-family: Verdana, sans-serif;
-    font-size: 100%;
-    line-height: 140%;
-    color: #333; 
-}
-pre {
-    border: 1px dotted gray;
-    background-color: #ececec;
-    color: #1111111;
-    padding: 0.5em;
-}
-code {
-    font-family: monospace;
-}
-table {
-    border-collapse: collapse;
-}
-table, th, td {
-  border: 1px solid black;
-}
-th, td {
-  padding: 0.5em;
-}
-h1 a, h2 a, h3 a, h4 a, h5 a { 
-    text-decoration: none;
-    color: #4a5ada;
-}
-h1 code, h2 code, h3 code, h4 code, h5 code { 
-    font-size: 120%;
-}
-h1, h2, h3, h4, h5 {
-    font-family: verdana;
-    font-weight: bold;
-    border-bottom: 1px dotted black;
-    color: #4a5ada;
-}
-h1 {
-        font-size: 150%;
-        text-align: center ;
-        border: 2px solid black;
-        padding: 2em;
-}
-h1:first-of-type{
-        font-size: 150%;
-        text-align: center ;
-        border: 2px solid black;
-        background-color: #ddddff;
-}
-
-h2 {
-    font-size: 140%;
-    border: 1px dotted black;
-    text-align: center;
-    padding: 0.5em;
-}
-
-h3 {
-        font-size: 130%;
-}
-
-h4 {
-        font-size: 120%;
-        font-style: italic;
-}
-
-h5 {
-        font-size: 110%;
-        font-style: italic;
-}
-
-h1.title {
-        font-size: 200%;
-        font-weight: bold;
-        padding-top: 0.2em;
-        padding-bottom: 0.2em;
-        text-align: left;
-        border: none;
-}
-
-dt code {
-        font-weight: bold;
-}
-dd p {
-        margin-top: 0;
-}
-
-#footer {
-        padding-top: 1em;
-        font-size: 70%;
-        color: gray;
-        text-align: center;
-        }
Index: checkout/doc/modeldoc.md
===================================================================
--- checkout/doc/modeldoc.md	(revision 93212)
+++ checkout/doc/modeldoc.md	(nonexistent)
@@ -1,2 +0,0 @@
-Component-Specific Documentation
-================================
\ No newline at end of file
Index: checkout/doc/NEWTEST.md
===================================================================
--- checkout/doc/NEWTEST.md	(revision 93212)
+++ checkout/doc/NEWTEST.md	(nonexistent)
@@ -1,876 +0,0 @@
-<a name="new-system"></a>New Test System
-========================================
-
-The old regression test system has been replaced by a new system.  It
-has a different design that the old one.  It has a superset of the
-capabilities of the old system, but the different design leads to
-advantages and disadvantages.
-
-Presently, that implementation is available by the NEMS/tests/rtgen
-script, and two scripts it generates (rtrun, rtreport).  For backward
-compatibility, there is a wrapper "rt.sh" script to prevent users from
-having to learn a new system if they are only running the regression
-tests (not modifying them).
-
-<a name="design"></a>Design and Capabilities
---------------------------------------------
-
-This system works on a different principle than the older one.  The
-old system ran shell scripts specific to each model or test which
-copied files from outside the NEMS test area and ran external programs
-to generate some inputs.
-
-The new system has a directory of prepared inputs, has no external
-dependencies, and simply runs the NEMS executable without any
-test-specific scripts.  In other words, scripts like the
-`exglobal_fcst_nems.sh` are no longer used.  This makes porting and
-workflow changes simpler, but has the disadvantage of not testing
-model workflow scripts.  That disadvantage is intentional; the purpose
-of the NEMS regression tests is to test the NEMS, not model workflow
-scripts.
-
-<a name="running"></a>Running the System
-----------------------------------------
-
-This section explains how to run the system in its simplest form.
-Later sections discuss [running subsets of the tests](#run-sub),
-[dependency resolution](#dep-res), and [available tests](#list-avail).
-We provide two methods: a simple way using the rt.sh wrapper, and a
-more complex way that provides complete control and flexibility.
-
-### <a name="new-rtsh"></a>Simple Method: rt.sh
-
-For backward compatibility, there is an rt.sh script that acts
-similarly to the old rt.sh.  Some aspects are different to give extra
-flexibility.
-
-To execute in an sh-family shell (sh, bash, ksh, etc.)
-
-    cd NEMS/tests
-    ./rt.sh (options) > rt.log 2>&1 &
-
-To execute in a csh-family shell (csh, tcsh):
-
-    cd NEMS/tests
-    ./rt.sh (options) >& rt.log &
-
-This will run rt.sh in the background and send all output to the
-`rt.log` file.  To see the success or failure information, look in the
-`rt.log` file.
-
-The `(options)` specify what is to be run.  Common needs are:
-
-* `-f` = run the full test suite
-* `-s` = only run the "standard" tests
-* `-t setname` = run the specified set of tests.  See
-  `compsets/all.input` for the full list.  Common names are `standard`,
-  `gfs`, and `nmm`
-* `-b setname` = regenerate the baseline.
-* `-n /path/to/baseline` = specifies the location of the baseline
-  when running the suite in verification or baseline generation modes.
-* `-r PLATFORM:/path/to/rtgen.###` - used by the full test method.
-  See below.
-* `-p project` = set the project or account to use for CPU hours.
-  If unspecified, one will be automatically picked based on 
-  cpu availability.
-
-### Full Test Method
-
-The process of running is:
-
-    ./NEMS/tests/rtgen   # generates rtrun and rtreport commands
-    /path/to/USERNAME/rtgen.(ID)/rtrun (options)
-    /path/to/USERNAME/rtgen.(ID)/rtreport
-
-To use this for a commit to the trunk, one must copy the results to
-the NEMS/tests directory.  This could be done manually, or one could
-run rt.sh and tell it to skip the rtgen step.  To do this, use the
-`rt.sh -r` option:
-
-    ./rt.sh -r (PLATFORM):/path/to/USERNAME/rtgen.(ID)
-
-where `(PLATFORM)` is "theia" or "wcoss.phase1"
-
-The rest of this section explains the purpose and function of rtgen,
-rtrun and rtreport.
-
-### Step 1: Generate Test Scripts (rtgen)
-
-The first step is to run `rtgen`.  This will generate a set of scripts
-to run the requested tests.  If you do not request any tests, it will
-run all tests.
-
-    ./NEMS/tests/rtgen
-
-That command will give you instructions and will log the more
-important parts of its execution:
-
-    11/17 18:42:38Z rtgen-INFO:  Will run all known tests.
-    11/17 18:42:50Z rtgen-INFO:  Auto-chosen project for job submission is 'cmp'
-    11/17 18:42:51Z rtgen-INFO:  Auto-chosen ptmp is '/path/to/USERNAME'
-    11/17 18:42:51Z rtgen-INFO:  Generating workflow with id 23768.
-    11/17 18:42:55Z rtgen-INFO:  Requested test has been generated.
-    You need to run the test now.   You have three options:
-    OPTION 1: Put this in your cron:
-      */3 * * * * /path/to/USERNAME/rtgen.23768/rtrun --step --zero-exit \
-         > /path/to/USERNAME/rtgen.23768/rtrun-cron.log 2>&1
-
-    OPTION 2: Run this program:
-      /path/to/USERNAME/rtgen.23768/rtrun --loop
-
-    OPTION 3: Verbose mode: run this program:
-      /path/to/USERNAME/rtgen.23768/rtrun) -v --loop
-    Adding -n to that command will disable colors.
-
-### Step 2: Run the Test (rtrun)
-
-The rtrun command runs the tests until all have succeeded or failed.
-You have three options for how to run this.  The easiest execution
-option is number 3, which runs on the command line and reports the
-queue status every few minutes.  The path to rtrun will vary, but the
-command will look something like this:
-
-    /path/to/USERNAME/rtgen.23768/rtrun -v --loop
-
-If the colors annoy you, add the `-n` switch, and if you don't want
-the queue state, remove the `-v` switch.
-
-The components of that path are:
-
-* `/path/to` - a scrub area, such as /scratch4/NCEPDEV/stmp4 or /ptmpp1
-* `USERNAME` - your username, such as `emc.nemspara` or `Samuel.Trahan`
-
-The `rtrun` command will generate output like this:
-
-    11/17 00:19:21Z rtrun INFO: check dependencies and submit jobs...
-    11/17 00:19:22Z rtrun INFO: check status...
-    11/17 00:19:22Z rtrun INFO: workflow is still running and no jobs have failed.
-    11/17 00:19:22Z rtrun INFO: sleep 2
-    11/17 00:19:24Z rtrun INFO: get queue information
-     Job ID  Reserv   Queue   Procs ST Queue Time  Stdout Location
-    -------- ------ --------- ----- -- ----------- ------------------------------------
-      573626        dev          64 R  11/17 00:14 /.../tmp/log/test_gfs_gocart_nemsio.log
-    From bjobs -l  -u Samuel.Trahan (age 0 sec.)
-    11/17 00:19:24Z rtrun INFO: sleep 100
-
-It will keep looping until all jobs have succeeded or failed.  If all
-goes well, the tests will all pass and you will see this message:
-
-    11/17 00:21:04Z rtrun INFO: check dependencies and submit jobs...
-    11/17 00:21:05Z rtrun INFO: check status...
-    11/17 00:21:05Z rtrun INFO: workflow is complete and all jobs succeeded.
-
-### Step 3: Report Results (rtreport)
-
-At that point, you can run rtreport to get a report of the tests.
-Actually, you can run rtreport at any time.  If the tests are not yet
-complete, it will tell you which ones are complete.  It will report
-all it knows about failed tests too.  There are two output formats:
-
-To run:
-
-    /path/to/USERNAME/rtgen.23768/rtreport [mode]
-
-Where the optional `mode` is one of:
-
-  * `status` - short output that only lists failed tests and counts
-    the number of failed, complete, and unfinished tests.
-
-  * `txt` - full text output of all information (the default).
-
-The output of `txt` mode (the default) looks something like this
-
-    BUILD nmm.x: SUCCEEDED
-    BUILD nmm.debug.x: SUCCEEDED
-    BUILD gsm.x: SUCCEEDED
-    BUILD gsm_gocart.x: SUCCEEDED
-    TEST #1: PASS
-      Test nmm_cntrl starting.
-      Wed Nov 16 22:51:23 UTC 2016
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0000h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0024h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0048h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0000h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0024h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0048h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_bin_0024h_00m_00.00s: bit-for-bit identical
-      .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_nio_0024h_00m_00.00s: bit-for-bit identical
-      TEST PASSED
-    TEST #2: PASS
-      Test nmm_nemsio starting.
-    ... information about more tests ...
-
-
-### <a name="rerun"></a>Rerunning Failed Tests
-
-If a test fails, you can request that it be rerun via the `rtrewind`
-command.  The command is located in the same directory as `rtrun`
-and can be called in two different ways:
-
-    /path/to/USERNAME/rtgen.23768/rtrewind -a
-
-    /path/to/USERNAME/rtgen.23768/rtrewind job1 [job2 [...]]
-
-The first method requests a rerun of ALL tests and builds while the
-second requests only certain ones be rerun.
-
-The jobs (`job1`, `job2`, ...) are the names from the test suite such
-as `gsm.x` or `nmm_cntrl`.  You can optionally include `test_` or
-`build_` before the name, as it is printed by the `rtreport` command.
-
-### <a name="run-sub"></a>Running Subsets of the Test Suite
-
-The test suite, as of this writing, has 48 tests and 5 build options.
-Frequently, you only want to run a few of them.  The `rtgen` script
-has a simple set arithmetic language for specifying what to run.  The
-subsetting is done by the command line.  For example, to run all
-standard nmm tests, you need to take the intersection of those two
-sets of tests:
-
-    ./NEMS/tests/rtgen 'inter(nmm,standard)'
-
-The `rtgen` will generate a workflow to run just those tests.  
-
-Other subsetting operations:
-
-    union(nmm,wam)   # run all nmm and wam tests
-    minus(gfs,wam)   # run all gsm (gfs) tests that are not wam tests
-    {gfs_slg,nmm_cntrl}  # run the gfs_slg and nmm_cntrl tests
-
-You can combine multiple operations:
-
-    minus(inter(union(gfs,nmm),standard),{gfs_slg,nmm_cntrl})
-
-That will ask rtgen to run all gsm (gfs) and nmm tests that are
-standard tests, except for `gfs_slg` and `nmm_cntrl`.
-
-Despite that, the rtgen will still run the gfs_slg test.  Why?
-Dependency resolution.
-
-### <a name="dep-res"></a>Dependency Resolution
-
-Some tests have dependencies, and `rtgen` will resolve those
-dependencies automatically, similar to how `make` works.  For example,
-the `gfs_slg_rsthst` requires the `gfs_slg` to run first.  Output from
-`gfs_slg` is used as input to `gfs_slg_rsthst`.  If you ask `rtgen` to
-run `gfs_slg_rsthst` without running `gfs_slg`, it will see the
-dependency and add `gfs_slg` to your list of tests.  The builds are
-handled the same way.  The `gfs_slg` has a dependency on the build
-`gsm.x`, and so `rtgen` will always add the `gsm.x` build if you
-select the `gfs_slg` test.
-
-
-### <a name="list-avail"></a>List of Available Tests and Sets
-
-The configuration for `rtgen` is stored in the compsets/all.input file
-in the app level repository.  This is where you specify the available
-tests and sets of tests.
-
-The top few lines of that file look like this
-
-    load 'gsm.input'
-    load 'nmm.input'
-    run nmm_cntrl              @ nmm, standard, baseline, nmmglob
-    run nmm_nemsio             @ nmm,                     nmmglob
-    run nmm_rest               @ nmm,                     nmmglob
-    ... many more "run" statements ...
-
-The first two lines import the details of the test from other files.
-The lines beginning with `run` specify a test to run and the sets it
-belongs to.  The test must be one declared in the other file,
-as discussed later in this document.
-
-The list of sets after the @ sign are the ones recognized by the
-[subsetting functionality of rtgen](#run-sub). 
-
-Note that you can enable tests on only certain platforms by including
-a comparison operator in the list of subsets:
-
-    run gfs_slg_2thread        @ gfs, standard, baseline, slg, plat==wcoss.phase1
-
-This line ensures the `gfs_slg_2thread` is only available on WCOSS Phase 1.
-
-
-
-<a name="work-area"></a>Work Area Contents
-------------------------------------------
-
-Running the `rtgen` creates a directory in a scrub area which will
-contain the generated scripting system, input and output files, logs,
-and resource usage information.  This section documents those files
-and directories.
-
-Recall that running `rtgen` creates a directory with a name like this:
-
-    /path/to/USERNAME/rtgen.23768
-
-That directory contains the following:
-
-* rtrun script
-
-* rtreport script
-
-* jobs directory
-
-* scripts directory
-
-* ush directory
-
-* src directory
-
-  * install.sh
-
-  * uninstall.sh
-
-* exec directory
-
-* include directory
-
-* rocoto directory
-
-* com directory
-
-* tmp directory
-
-  * tmp/log directory
-
-### Jobs, Scripts and Ush
-
-These are the three tier NCEP workflow directories and have the usual
-meanings:
-
-* jobs - sets up the environment and passes control to the "scripts" level
-
-* scripts - high-level logic for each test
-
-* ush - low-level utility functions
-
-For each test, there is one "jobs" directory file and one "scripts"
-directory file.  The "scripts" directory and "jobs" directory are
-populated by the tests blocks which will be discussed in great detail
-in the [Test Description Language](#desc-lang) section.  They are
-generated from the [test blocks](#new-tests).
-
-### Src, Exec, and Include
-
-The `src` directory does not contain source code.  Instead, it
-contains two scripts that describe how to build or uninstall the
-`NEMS.x`
-
-* install.sh - knows how to build the NEMS.x based on the instructions
-  in the [build blocks](#new-build) as explained in the [Test
-  Description Language](#desc-lang) section in great detail.
-
-* uninstall.sh - deletes the copies of `NEMS.x` and `modules.nems`
-  created by install.sh.
-
-The `install.sh` creates executables and modulefiles which are copied
-into the `exec` and `include` directories.
-
-* exec - one executable for each NEMS build
-
-* include - one file for each NEMS build containing a sequence of
-  of "module load" commands.  These commands will be run before
-  executing the NEMS.x
-
-### Rocoto Directory
-
-The `rtgen` makes one file in the `rocoto` directory.  The `rtrun`
-will create a second file.
-
-* workflow.xml - the definition of the workflow generated by `rtgen`.
-  This includes dependencies and resource requirements.  There is one
-  shell command for each test or build.
-
-* workflow.db - created by `rtrun`, this contains the Rocoto internal
-  state information.
-
-### Tmp and Logs
-
-The `tmp` directory contains all logs and all execution directories
-for each test.
-
-* tmp/log/rocoto.log - log file from Rocoto.  Contains information about
-  batch system events, such as job failures or job submissions.
-
-* tmp/log/*.log - all other files contain logs about a test or build
-
-* tmp/* - all other directories are work areas for tests.  They
-  contain inputs and outputs from the NEMS.x
-
-### Scripts rtrun and rtreport
-
-These are discussed in earlier sections.  The scripts are generated
-automatically by `rtgen`.  The `rtrun` runs Rocoto and the `rtreport`
-scans the reports, combining them into one text file.
-
-### COM directory
-
-This directory contains one subdirectory for each test with all
-verified files as described in a test's (criteria)[#criteria] block.
-It also contains the "report.txt" file with the report of the test
-success or failure.
-
-<a name="desc-lang"></a>Test Description Language
--------------------------------------------------
-
-This chapter discusses the language used by the `rtgen` tool to
-describe regression tests and compsets.  The language consists of
-"modules" which are simply a collection of variables and functions. A
-module has a type: build, test, hash, etc.  A set of `run` commands
-list which runnable modules should be executed.
-
-### <a name="vardef"></a>Variable Definitions and Modules
-
-The simplest type of module is a hash, which looks like this:
-
-    nems_vars={
-        atm_model='none'
-        atm_petlist_bounds="-1 -1"
-        ocn_model='none'
-        ocn_petlist_bounds="-1 -1"
-        ice_model='none'
-        ice_petlist_bounds="-1 -1"
-        med_model='nems'
-        med_petlist_bounds="-1 -1"
-        med_atm_coupling_interval_sec='-1'
-        med_ocn_coupling_interval_sec='-1'
-    }
-
-In this example, we have declared a hash called `nems_vars` which
-contains several variables, such as `atm_model` and
-`atm_petlist_bounds`.  Later on, another module declaration can "use"
-this module, to import its variables:
-
-    nmm_vars_global={
-        use plat%nmm_dflt
-        use nems_vars
-        use common_vars
-        use nmm_vars
-        use nmm_aliases
-        use nmm_uncoupled
-        GBRG="glob"
-        CNTL_NAME='NMMB_glob'
-    }
-
-Values can include variable substitution, which uses a similar syntax
-as shell, but with different escape characters:
-
-    common_vars={
-        THRD=1
-        WLCLK=15
-        GEFS_ENSEMBLE=0
-        GEN_ENSEMBLE=0
-        WRITE_DOPOST='.false.'
-        POST_GRIBVERSION='grib1'
-        CONF="@[plat%PARMnems]"
-    }
-
-Here, the `CONF` variable in the `common_vars` module has the value of
-the `PARMnems` variable in the `plat` module.
-
-### Strings
-
-There are three ways of specifying a string:
-
-* Double quotes: "... text here with @[VARIABLE] expansion ..."
-* Single quotes: '... text here with no variable expansion ...'
-* Block string:
-
-        [[[multi-line string
-        with @[VARIABLE] expansion ]]]
-
-If you need to insert a literal @ into the string, you have three
-options.  In these examples, we'll use the multi-line string format:
-
-* [[[  @['this text is not expanded']   ]]]
-* [[[  @["this text is not expanded"]  ]]]
-* [[[ Simple literal @[@] ]]]
-
-### <a name="embedscript"></a> Embedded Scripts
-
-Most of the scripts required to run the tests are automatically
-generated, but there are occasional instances when you need to specify
-specific code.  This is done via `embed` blocks:
-
-    embed bash nems_regtest_prep(RUNDIR,modules,CNTL) [[[
-            mkdir -p "$RUNDIR" "$CNTL"
-            cd @[RUNDIR]
-            source "$modules"
-            export MPI_TYPE_DEPTH=20
-            export ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4
-    ]]]
-
-In this example, we have embedded a bash script called
-`nems_regtest_prep`.  
-
-#### Embedded Script Variables: $ vs. @
-
-In the example script, there are two methods of doing variable substitution:
-
-* `@[RUNDIR]`
-* `"$RUNDIR"`
-
-They have slightly different meanings.  In the case of `@[RUNDIR]`,
-the value of the `RUNDIR` variable is substituted directly in the
-generated script.  If the variable contained any shell metacharacters,
-those would be copied verbatim.  In the case of `$RUNDIR`, the bash
-variable is used instead.  That variable's value is set before the
-code in `nems_regtest_prep` is run.
-
-Either approach is valid.  It is up to the user to decide which one to use.
-
-### Platform Detection
-
-The test suite needs to reconfigure certain aspects based on platform;
-WCOSS vs. Theia vs. GAEA, etc.  This is done with `platform` blocks.
-These are simply modules with a `detect` function.  After all
-platforms are defined, an `autodetect` block selects between them.
-
-Here is an example of a platform.  This is the one for Phase 1 of WCOSS.
-
-    platform wcoss.phase1 {
-        use wcoss.common
-        CPU_ACCOUNT='NAM-T2O'
-        pex='1'
-        cores_per_node=32
-        MPI='LSF'
-        SHORT_TEST_QUEUE='&SHORTQ;'
-        LONG_TEST_QUEUE='&LONGQ;'
-        BUILD_QUEUE='&BUILDQ;'
-    
-        embed bash detect [[[
-            # This function is used at PARSE TIME to detect whether we are
-            # on WCOSS Phase 1.  It must be very fast and low resource
-            # usage since the parser runs it.
-            if [[ -d /usrx && -d /global && -e /etc/redhat-release && \
-                  -e /etc/prod ]] ; then
-                # We are on WCOSS Phase 1 or 2.
-                if ( ! cat /proc/cpuinfo |grep 'processor.*32' ) ; then
-                    # Fewer than 32 fake (hyperthreading) cpus, so Phase 1.
-                    exit 0
-                fi
-            fi
-            exit 1
-        ]]]
-        ... more wcoss stuff ...
-    }
-
-Note the `embed bash` block called `detect`.  This is the bash
-function that is run to detect whether the script is running on WCOSS
-Phase 1.
-
-Once all platforms are defined, there is an autodetect block:
-
-    autodetect plat (/ wcoss.phase1, theia /)
-
-This will define the `plat` variable, which is a duplicate of either
-`wcoss.phase1` or `theia`.
-
-### <a name="new-build"></a> Build Definition
-
-The `build` blocks define a method of building an executable.  They
-must define three variables and a function:
-
-* `NEMS.x` = path to the NEMS executable created by this build
-
-* `modules.nems` = list of "module load" commands to execute before
-   running the executable
-
-* `target` = file to check to ensure the build succeeded; should be
-   the same as the `NEMS.x` variable
-
-* `build` = an `embed bash` function that builds the program.
-
-Here is an example.  This builds the GOCART-capable standalone GSM in
-the NEMSLegacy branch:
-
-    build gsm_gocart.x {
-        use plat
-        NEMS.x="@[plat%EXECrt]/NEMS_gocart.x"
-        modules.nems="@[plat%INCrt]/NEMS_gocart.x.modules"
-        target="@[NEMS.x]"
-        build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
-                             OPTS="app=GSM-GOCART")
-    }
-
-The NEMSAppBuilder function is declared elsewhere.  It is used by most
-of the `build` definitions to avoid duplication.  That function looks
-like this:
-
-    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
-    [[[
-            mkdir -p "@[plat%EXECrt]" "@[plat%INCrt]"
-            rm -f "@[NEMS.x]" "@[modules.nems]"
-            cd @[plat%HOMEnems]
-    
-            # NOTE: Replace "rebuild" with "norebuild" to disable "gmake clean"
-            ./NEMS/NEMSAppBuilder rebuild $OPTS
-    
-            cd @[plat%SRCnems]
-            cp -fp ../exe/NEMS.x "@[NEMS.x]"
-            cp -fp conf/modules.nems "@[modules.nems]"
-    ]]]
-
-Notice that the four variables we're passing from gsm_gocart.x%build
-are in the definition line of NEMSAppBuilder:
-
-    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
-    ...
-    build gsm_gocart.x {
-        ...
-        build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
-                             OPTS="app=GSM-GOCART")
-
-### <a name="new-tests"></a>Tests
-
-A test is a module that defines the following:
-
-* dependencies - any other tests or builds that have to run first
-
-* `prep` - a preparation step to run before anything else.  This is
-  generally `mkdir`, `module` or `cd` commands.
-
-* `input` - a `filter` block that provides a list of input files or
-  directories and instructions on how to copy or filter them.  This is
-  described below.
-
-* `execute` - a `spawn` block that describes how to run the `NEMS.x`.
-  This is also used to generate job cards to request the needed
-  resources.
-
-* `output` - criteria for validating the test output.  These are
-  usually `criteria` blocks, described below.
-
-This is the `test` block for the global nmm control.  Later text
-describe the meaning of each part:
-
-    # nmm_cntrl test
-    test nmm_cntrl: nmm.x {
-        use nmm_vars_global
-    
-        # Convenience variables:
-        RUNDIR_ROOT="@[plat%TMPrt]"
-        RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
-        TEST_DESCR="Compare NMMB-global results with previous trunk version"
-        CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
-        TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
-        COM="@[plat%COMrt]/@[TEST_NAME]"
-    
-        criteria output {
-            #    NEMS.x output file --------- comparison - control file or dir
-            "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-            "nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-        }
-    
-        # The prep is run at the top of any job.  It should do such things
-        # like making directories and loading modules.
-        prep=nems_regtest_prep(
-            RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
-            CNTL="@[CNTL]")
-    
-        # The execute step runs the program:
-        spawn execute {
-            { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
-        }
-    
-        filters input {
-            # work file         operation   input file
-         "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
-         "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
-         "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
-    
-         "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
-         "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
-    
-         "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
-         "model_configure"        .copy. "configure_file_01"
-    
-         "*"                   .copydir. "@[plat%NMM_DATA]"
-    
-         "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
-         "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
-         "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
-         "fort.28"                .link. "global_o3prdlos.f77"
-         "fort.48"                .link. "global_o3clim.txt"
-    
-         "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
-         "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
-        }
-    }
-    
-#### Test Dependencies
-
-The first line (after the comment) is this:
-
-    test nmm_cntrl: nmm.x {
-
-The `: nmm.x` indicates that the `nmm.x` build has to run before the
-`nmm_cntrl` can start.  The test suite will include that dependency in
-its Rocoto or ecFlow automation system.
-
-#### Test Prep
-
-The prep step is a simple script that prepares the environment.  In
-this case, it just runs the nems_regtest_prep, which we discussed
-earlier:
-
-        # The prep is run at the top of any job.  It should do such things
-        # like making directories and loading modules.
-        prep=nems_regtest_prep(
-            RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
-            CNTL="@[CNTL]")
-
-Note that it refers to `@[RUNDIR]` and `@[CNTL]`.  Those variables are
-defined earlier in the same test:
-
-        # Convenience variables:
-        RUNDIR_ROOT="@[plat%TMPrt]"
-        RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
-        TEST_DESCR="Compare NMMB-global results with previous trunk version"
-        CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
-        TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
-        COM="@[plat%COMrt]/@[TEST_NAME]"
-
-#### Test Input Filter
-
-This block specifies the input files and how to prepare them.  It
-declares an `input` variable inside the `nmm_cntrl` test, which is of
-type `filters`:
-
-        filters input {
-            # work file         operation   input file
-         "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
-         "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
-         "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
-    
-         "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
-         "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
-    
-         "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
-         "model_configure"        .copy. "configure_file_01"
-    
-         "*"                   .copydir. "@[plat%NMM_DATA]"
-    
-         "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
-         "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
-         "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
-         "fort.28"                .link. "global_o3prdlos.f77"
-         "fort.48"                .link. "global_o3clim.txt"
-    
-         "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
-         "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
-        }
-
-Notice that there are four different operations in the middle column:
-
-| Local file          | Operation   | Remote file or directory        |  
-| ------------------- | ----------- | ------------------------------- |
-| `"GWD_bin_01"`      | `.copy.`    | `"@[TEST_IN]/GWD_bin_01"`       |
-| `"*"`               | `.copydir.` | `"@[plat%NMM_DATA]"`            |
-| `"fort.28"`         | `.link.`    | `"global_o3prdlos.f77"`         |
-| `"atmos.configure"` | `.atparse.` | `"@[CONF]/atmos.configure_nmm"` |
-
-* `.copy.` - copies the remote file (third column) to the local file
-  (first column).  
-
-        cp -p "$third_column" "$first_column"
-
-* `.link.` - makes a symbolic link to the remote file (third column)
-  from the local file (first column)
-
-        ln -s "$third_column" "$first_column"
-
-* `.copydir.` - copies from the remote file or directory (third
-  column) all files that match the glob (first column) into the local
-  directory.
-
-        cp -rp "$third_column"/$first_column
-
-* `.atparse.` - runs the remote file (third column) through a filter
-  to create the local file (first column).  The filter will replace
-  text like `@[varname]` with the corresponding variable.  
-
-  In the `.atparse.` variable replacement, only variables from the
-  test's module are replaced.  Hence, if you want many variables
-  accessible to `.atparse.`d files, you need to either declare or
-  `use` them.  The `nmm_cntrl` test does that at the top of its
-  declaration:
-
-        test nmm_cntrl: nmm.x {
-            use nmm_vars_global
-        
-            # Convenience variables:
-            RUNDIR_ROOT="@[plat%TMPrt]"
-            RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
-            TEST_DESCR="Compare NMMB-global results with previous trunk version"
-            CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
-            TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
-            COM="@[plat%COMrt]/@[TEST_NAME]"
-
-  Everything in the `nmm_vars_global` module will be available plus
-  all six of the declared "convenience variables"
-
-  Note that variables with a period (".") or percent ("%") in their
-  name are not yet available.  That will be fixed in a later release.
-
-#### Test Execution
-
-The next step is to actually run the `NEMS.x`:
-
-        # The execute step runs the program:
-        spawn execute {
-            { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
-        }
-
-The columns inside the `execute` block have these meanings:
-
-* `"@[nmm.x%NEMS.x]"` - the program to run
-
-* `ranks="@[TASKS]"` - number of mpi ranks
-
-* `threads="@[OpenMPThreads]"` - optional; number of threads per rank.
-  Default is 1.
-
-* ppn=8 - not used here; optional.  Specifies the number of MPI ranks
-  per node.  The GSM needs this due to memory limits.  Default is
-  calculated automatically by the system, and will be the largest
-  number of MPI ranks possible.
-
-#### <a name="criteria"></a> Test Verification or Baseline Generation
-
-The last step is to either verify the results or generate the
-baseline.  Both cases are handled by the output criteria block:
-
-    criteria output {
-        #    NEMS.x output file --------- comparison - control file or dir
-        "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-	"nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
-    }
-
-The columns have this meaning:
-
-* `"nmmb_hst_01_bin_0000h_00m_00.00s"` - local directory file
-
-* `.bitcmp.` - verification method.  Only `.bitcmp.` is supported for now.
-
-* `"@[CNTL]"` - remote directory file or remote directory that
-  contains the baseline.  If it is a remote directory, the file is
-  assumed to have the same name.
-
-In verification mode, the comparisons are performed after running NEMS.x
-
-In baseline generation mode, the local file (first column) is copied
-to the remote location (third column).
\ No newline at end of file
Index: checkout/doc/README.GFS.md
===================================================================
--- checkout/doc/README.GFS.md	(revision 93212)
+++ checkout/doc/README.GFS.md	(nonexistent)
@@ -1,169 +0,0 @@
-GSM Modification Instructions
------------------------------
-
-### How to add new variable to sigf file:
-
-1. Go to `~/src/atmos/gfs/dyn` directory
-
-2. Search the new variable, 
-
-    a. If it is in a module, add that module in subroutine
-    POINT_DYNAMICS_OUTPUT_GFS in gfs_dynamics_output.f
-
-    b. If it is not in any module, add the new variable in dynamice
-    internal state, so that it can be passed into
-    POINT_DYNAMICS_OUTPUT_GFS
-
-      1. declare the variable in gfs_dynamics_internal_state_mod.f
-
-      2. allocate the variable in gfs_dynamics_initialize_mod.f
-
-    c. If the new variable is a 2D or 3D field in grid_gr, do nothing
-    (grid_gr has already be declared and allocated)
-
-3. In gfs_dynamics_output.f, add the variable name to the
-   corresponding output list, set the pointer in subroutine
-   POINT_DYNAMICS_OUTPUT_GFS for the new variable. (see example below)
-
-4.  In gfs_dynamics_initialize_mod.f, increase the dimension of the
-   output full grid buffer, `buff_mult_pieceg`, `ngrids_gg`,
-   `ngrids_gg=ngrids_gg+1(2d)`, or `ngrids_gg=ngrids_gg+fld_levs(3d)`
-
-5. If the new variable is a 2D or 3D array, pass it into
-   wrtout_dynamics, add the variable to subroutine grid_collect. In
-   grid_collect.f interpolate the field into full grid field and save the
-   data in buff_mult_pieceg.
-
-With these changes, a field (2d or3d array) or an attribute(1d
-int, real, log) will be added into the sigma field bundle in import
-write state, and it will then be written out in write grid component.
-Eg, to add dpdt(pressure tendency: ptend) in sigf file:
-
-1. dpdt is in grid_gr
-2. in gfs_dynamics_output.f, in DYN_INT_STATE_3D_R_ADIAB, add
-
-        ,'ptend       ', 'OGFS_SIG  ', 'levs      ' &
-
-before tracer "spfh". If adding a new tracer, add that tracer after
-"clwmr".  Notice in POINT_DYNAMICS_OUTPUT_GFS, the pointer for 3d
-real array output is set to buff_mult_pieceg
-
-        R_3D(1)%NAME=>buff_mult_pieceg
-
-we will add the output field ptend in buff_mult_pieceg
-
-3. in subroutine wrtout_dynamics in wrtout_dynamics.f, get dpdt from
-`grid_gr(:,:,g_dpdt:gdpdt+levs-1)`, and pass dpdt to grid_collect
-
-        !
-         do k=1,levs
-           do i=1,lons_lat
-             dpdt(i,lan,k) = grid_gr(i+jlonf,g_dpdt-1+k)
-           enddo
-         enddo
-         call grid_collect (zsg,psg,uug,vvg,ttg,rqg,dpg,dpdt,
-        &          global_lats_a,lonsperlat)
-
-4. in  gfs_dynamics_initialize_mod.f, 
-
-        ngrids_gg=ngrids_gg+levs
-
-5. in grid_collect.f, interpolate the field from reduced grid to full
-grid, and add this field in buff_mult_pieceg before `tracersi(rqg)`
-start:
-
-       !
-        do k=1,levs
-          buffi(:,:) = dpdt(:,:,k)
-          CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
-       & buff_mult_pieceg(1,1,2+5*levs+k) )
-         enddo
-
-Same procedure should be done with 2D arrays.
-
-### How to add new variable to sfcf or flxf file:
-
-1. Go to `~/src/atmos/gfs/phys` directory
-
-2. Search the new variable,
-
-    a. if it is in a module, add that module in subroutine
-    POINT_PHYSICS_OUTPUT_GFS in gfs_physics_output.f
-
-    b. if it is not in any module, and if the new variable is a scalar
-    or a 1D array, add it in physics internal state, so it can be
-    passed into POINT_PHYSICS_OUTPUT
-
-    c. if it is not in any module, and the new variable is 2D sfc or
-    flx field, in gfs_physics_sfc_flx_mod.f.
-
-      1. for sfc field,add the new variable in data type
-      Sfc_Var_Data,
-        
-      2. for flx field, add the new variable in Flx_Var_Data allocate
-      the new field in gfs_physics_sfc_flx_set_mod.f,if flx field,
-      initalize the field in subroutine flx_init
-
-3. In gfs_physics_output.f, add the variable name to the corresponding
-output list. 'OGFS_FLX' in the list is for flx file, 'OGFS_SFC' is for
-sfc file, 'OGFS_PHY' is for both files. The field name could be
-`"field_name"//"_"//"statistics property"`. SET THE Pointer in
-subroutine POINT_PHYSICS_OUTPUT_GFS for the new variable.
-
-4. If the new variable is a 2D or 3D, sfc or flx array:
-
-    a. increase dimension of sfc or flx output file buffer. In
-    gfs_physics_initialize_mod.f, increase ngrids_sfcc(total number of
-    sfc fields), or ngrids_sfc2d(total number of 2D sfc fields),or
-    ngrids_sfc3d(total 2D fields for all 3D sfc fields),or
-    ngrids_flx(total 2D flx fields)
-
-    b. for sfc field, in subroutine sfc_collect in wrtout_physics.f,
-    interpolate the field to full grid field, put the output full grd
-    field in buff_mult_piecea2d for 2D array, and put into
-    buff_mult_piecea3d for 3D array.
-
-    c. for flx field, in subroutine wrtflx_a in wrtout_physics.f,
-    interpolate the field to full grid field, put the output full grd
-    field in buff_mult_piecef.
-
-With these changes, a field (2d or 3d array) or an attribute(1d
-int, real, or log) will be added into the sfc or flx field bundle in import
-write state, and it will then be written out in write grid component.
-
-Eg, to add sunshine durationtime (sunsd) in flxf file:
-
-1. in gfs_physics_sfc_flx_mod.f, in TYPE Flx_Var_Data, add:
-
-        real(kind=kind_phys),pointer:: suntim(:,:)=>null()
-
-   allocate this array in gfs_physics_sfc_flx_set_mod.f, in allication
-   of xlf field, add:
-
-        flx_fld%suntim  (dim1,dim2), &
-
-2. in gfs_physics_output.f, in array PHY_INT_STATE_2D_R_FLX,add:
-
-        ,'sunsd_acc       ', 'OGFS_FLX        ', 'sfc             ' &
-
-3. add new field in output buffer buff_mult_piecef:
-
-    a. change dimension of buff_mult_piecef in gfs_physics_initialize_mod.f:
-
-            ngrids_flx  = 66+43+6
-
-    b. in wrtflx_a in wrtout_physics.f, interpolate the suntim to full
-    grid fields and save it in buff_mult_piecef
-
-            !
-            !    accumulated sunshine time
-            !
-                  glolal  = flx_fld%suntim
-                  ngrid2d = ngrid2d+1
-                  CALL uninterprez(2,kmsk0,buffo,glolal,global_lats_r,lonsperlar,
-            &     buff_mult_piecef(1,1,ngrid2d))
-            !     if(ierr.ne.0)print*,'wrtsfc gribit ierr=',ierr,'  ',
-            !    x '107)Accumulated sunshine duration (sec)'
-            !
-            !    end sunshine time
-
Index: checkout/doc/markdown2.py
===================================================================
--- checkout/doc/markdown2.py	(revision 93212)
+++ checkout/doc/markdown2.py	(nonexistent)
@@ -1,2610 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 Trent Mick.
-# Copyright (c) 2007-2008 ActiveState Corp.
-# License: MIT (http://www.opensource.org/licenses/mit-license.php)
-
-from __future__ import generators
-
-r"""A fast and complete Python implementation of Markdown.
-
-[from http://daringfireball.net/projects/markdown/]
-> Markdown is a text-to-HTML filter; it translates an easy-to-read /
-> easy-to-write structured text format into HTML.  Markdown's text
-> format is most similar to that of plain text email, and supports
-> features such as headers, *emphasis*, code blocks, blockquotes, and
-> links.
->
-> Markdown's syntax is designed not as a generic markup language, but
-> specifically to serve as a front-end to (X)HTML. You can use span-level
-> HTML tags anywhere in a Markdown document, and you can use block level
-> HTML tags (like <div> and <table> as well).
-
-Module usage:
-
-    >>> import markdown2
-    >>> markdown2.markdown("*boo!*")  # or use `html = markdown_path(PATH)`
-    u'<p><em>boo!</em></p>\n'
-
-    >>> markdowner = Markdown()
-    >>> markdowner.convert("*boo!*")
-    u'<p><em>boo!</em></p>\n'
-    >>> markdowner.convert("**boom!**")
-    u'<p><strong>boom!</strong></p>\n'
-
-This implementation of Markdown implements the full "core" syntax plus a
-number of extras (e.g., code syntax coloring, footnotes) as described on
-<https://github.com/trentm/python-markdown2/wiki/Extras>.
-"""
-
-cmdln_desc = """A fast and complete Python implementation of Markdown, a
-text-to-HTML conversion tool for web writers.
-
-Supported extra syntax options (see -x|--extras option below and
-see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
-
-* code-friendly: Disable _ and __ for em and strong.
-* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
-* fenced-code-blocks: Allows a code block to not have to be indented
-  by fencing it with '```' on a line before and after. Based on
-  <http://github.github.com/github-flavored-markdown/> with support for
-  syntax highlighting.
-* footnotes: Support footnotes as in use on daringfireball.net and
-  implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
-* header-ids: Adds "id" attributes to headers. The id value is a slug of
-  the header text.
-* html-classes: Takes a dict mapping html tag names (lowercase) to a
-  string to use for a "class" tag attribute. Currently only supports "img",
-  "table", "pre" and "code" tags. Add an issue if you require this for other
-  tags.
-* link-patterns: Auto-link given regex patterns in text (e.g. bug number
-  references, revision number references).
-* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
-  have markdown processing be done on its contents. Similar to
-  <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
-  some limitations.
-* metadata: Extract metadata from a leading '---'-fenced block.
-  See <https://github.com/trentm/python-markdown2/issues/77> for details.
-* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
-  <http://en.wikipedia.org/wiki/Nofollow>.
-* numbering: Support of generic counters.  Non standard extension to
-  allow sequential numbering of figures, tables, equations, exhibits etc.
-* pyshell: Treats unindented Python interactive shell sessions as <code>
-  blocks.
-* smarty-pants: Replaces ' and " with curly quotation marks or curly
-  apostrophes.  Replaces --, ---, ..., and . . . with en dashes, em dashes,
-  and ellipses.
-* spoiler: A special kind of blockquote commonly hidden behind a
-  click on SO. Syntax per <http://meta.stackexchange.com/a/72878>.
-* tag-friendly: Requires atx style headers to have a space between the # and
-  the header text. Useful for applications that require twitter style tags to
-  pass through the parser.
-* tables: Tables using the same format as GFM
-  <https://help.github.com/articles/github-flavored-markdown#tables> and
-  PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
-* toc: The returned HTML string gets a new "toc_html" attribute which is
-  a Table of Contents for the document. (experimental)
-* use-file-vars: Look for an Emacs-style markdown-extras file variable to turn
-  on Extras.
-* wiki-tables: Google Code Wiki-style tables. See
-  <http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
-* xml: Passes one-liner processing instructions and namespaced XML tags.
-"""
-
-# Dev Notes:
-# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
-#   not yet sure if there implications with this. Compare 'pydoc sre'
-#   and 'perldoc perlre'.
-
-__version_info__ = (2, 3, 2)
-__version__ = '.'.join(map(str, __version_info__))
-__author__ = "Trent Mick"
-
-import sys
-import re
-import logging
-try:
-    from hashlib import md5
-except ImportError:
-    from md5 import md5
-import optparse
-from random import random, randint
-import codecs
-try:
-    from urllib import quote_plus
-except ImportError:
-    from urllib.parse import quote_plus
-
-
-# ---- Python version compat
-
-if sys.version_info[:2] < (2, 4):
-    def reversed(sequence):
-        for i in sequence[::-1]:
-            yield i
-
-# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
-if sys.version_info[0] <= 2:
-    py3 = False
-    try:
-        bytes
-    except NameError:
-        bytes = str
-    base_string_type = basestring
-elif sys.version_info[0] >= 3:
-    py3 = True
-    unicode = str
-    base_string_type = str
-
-# ---- globals
-
-DEBUG = False
-log = logging.getLogger("markdown")
-
-DEFAULT_TAB_WIDTH = 4
-
-
-SECRET_SALT = bytes(randint(0, 1000000))
-def _hash_text(s):
-    return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
-
-# Table of hash values for escaped characters:
-g_escape_table = dict([(ch, _hash_text(ch))
-    for ch in '\\`*_{}[]()>#+-.!'])
-
-
-# ---- exceptions
-class MarkdownError(Exception):
-    pass
-
-
-# ---- public api
-
-def markdown_path(path, encoding="utf-8",
-                  html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
-                  safe_mode=None, extras=None, link_patterns=None,
-                  use_file_vars=False):
-    fp = codecs.open(path, 'r', encoding)
-    text = fp.read()
-    fp.close()
-    return Markdown(html4tags=html4tags, tab_width=tab_width,
-                    safe_mode=safe_mode, extras=extras,
-                    link_patterns=link_patterns,
-                    use_file_vars=use_file_vars).convert(text)
-
-
-def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
-             safe_mode=None, extras=None, link_patterns=None,
-             use_file_vars=False):
-    return Markdown(html4tags=html4tags, tab_width=tab_width,
-                    safe_mode=safe_mode, extras=extras,
-                    link_patterns=link_patterns,
-                    use_file_vars=use_file_vars).convert(text)
-
-
-class Markdown(object):
-    # The dict of "extras" to enable in processing -- a mapping of
-    # extra name to argument for the extra. Most extras do not have an
-    # argument, in which case the value is None.
-    #
-    # This can be set via (a) subclassing and (b) the constructor
-    # "extras" argument.
-    extras = None
-
-    urls = None
-    titles = None
-    html_blocks = None
-    html_spans = None
-    html_removed_text = "[HTML_REMOVED]"  # for compat with markdown.py
-
-    # Used to track when we're inside an ordered or unordered list
-    # (see _ProcessListItems() for details):
-    list_level = 0
-
-    _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
-
-    def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
-                 extras=None, link_patterns=None, use_file_vars=False):
-        if html4tags:
-            self.empty_element_suffix = ">"
-        else:
-            self.empty_element_suffix = " />"
-        self.tab_width = tab_width
-
-        # For compatibility with earlier markdown2.py and with
-        # markdown.py's safe_mode being a boolean,
-        #   safe_mode == True -> "replace"
-        if safe_mode is True:
-            self.safe_mode = "replace"
-        else:
-            self.safe_mode = safe_mode
-
-        # Massaging and building the "extras" info.
-        if self.extras is None:
-            self.extras = {}
-        elif not isinstance(self.extras, dict):
-            self.extras = dict([(e, None) for e in self.extras])
-        if extras:
-            if not isinstance(extras, dict):
-                extras = dict([(e, None) for e in extras])
-            self.extras.update(extras)
-        assert isinstance(self.extras, dict)
-        if "toc" in self.extras and "header-ids" not in self.extras:
-            self.extras["header-ids"] = None   # "toc" implies "header-ids"
-        self._instance_extras = self.extras.copy()
-
-        self.link_patterns = link_patterns
-        self.use_file_vars = use_file_vars
-        self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
-
-        self._escape_table = g_escape_table.copy()
-        if "smarty-pants" in self.extras:
-            self._escape_table['"'] = _hash_text('"')
-            self._escape_table["'"] = _hash_text("'")
-
-    def reset(self):
-        self.urls = {}
-        self.titles = {}
-        self.html_blocks = {}
-        self.html_spans = {}
-        self.list_level = 0
-        self.extras = self._instance_extras.copy()
-        if "footnotes" in self.extras:
-            self.footnotes = {}
-            self.footnote_ids = []
-        if "header-ids" in self.extras:
-            self._count_from_header_id = {}  # no `defaultdict` in Python 2.4
-        if "metadata" in self.extras:
-            self.metadata = {}
-
-    # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
-    # should only be used in <a> tags with an "href" attribute.
-    _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
-
-    # Opens the linked document in a new window or tab
-    # should only used in <a> tags with an "target" attribute.
-    # same with _a_nofollow
-    _a_blank = _a_nofollow
-
-    def convert(self, text):
-        """Convert the given text."""
-        # Main function. The order in which other subs are called here is
-        # essential. Link and image substitutions need to happen before
-        # _EscapeSpecialChars(), so that any *'s or _'s in the <a>
-        # and <img> tags get encoded.
-
-        # Clear the global hashes. If we don't clear these, you get conflicts
-        # from other articles when generating a page which contains more than
-        # one article (e.g. an index page that shows the N most recent
-        # articles):
-        self.reset()
-
-        if not isinstance(text, unicode):
-            # TODO: perhaps shouldn't presume UTF-8 for string input?
-            text = unicode(text, 'utf-8')
-
-        if self.use_file_vars:
-            # Look for emacs-style file variable hints.
-            emacs_vars = self._get_emacs_vars(text)
-            if "markdown-extras" in emacs_vars:
-                splitter = re.compile("[ ,]+")
-                for e in splitter.split(emacs_vars["markdown-extras"]):
-                    if '=' in e:
-                        ename, earg = e.split('=', 1)
-                        try:
-                            earg = int(earg)
-                        except ValueError:
-                            pass
-                    else:
-                        ename, earg = e, None
-                    self.extras[ename] = earg
-
-        # Standardize line endings:
-        text = text.replace("\r\n", "\n")
-        text = text.replace("\r", "\n")
-
-        # Make sure $text ends with a couple of newlines:
-        text += "\n\n"
-
-        # Convert all tabs to spaces.
-        text = self._detab(text)
-
-        # Strip any lines consisting only of spaces and tabs.
-        # This makes subsequent regexen easier to write, because we can
-        # match consecutive blank lines with /\n+/ instead of something
-        # contorted like /[ \t]*\n+/ .
-        text = self._ws_only_line_re.sub("", text)
-
-        # strip metadata from head and extract
-        if "metadata" in self.extras:
-            text = self._extract_metadata(text)
-
-        text = self.preprocess(text)
-
-        if "fenced-code-blocks" in self.extras and not self.safe_mode:
-            text = self._do_fenced_code_blocks(text)
-
-        if self.safe_mode:
-            text = self._hash_html_spans(text)
-
-        # Turn block-level HTML blocks into hash entries
-        text = self._hash_html_blocks(text, raw=True)
-
-        if "fenced-code-blocks" in self.extras and self.safe_mode:
-            text = self._do_fenced_code_blocks(text)
-
-        # Because numbering references aren't links (yet?) then we can do everything associated with counters
-        # before we get started
-        if "numbering" in self.extras:
-            text = self._do_numbering(text)
-
-        # Strip link definitions, store in hashes.
-        if "footnotes" in self.extras:
-            # Must do footnotes first because an unlucky footnote defn
-            # looks like a link defn:
-            #   [^4]: this "looks like a link defn"
-            text = self._strip_footnote_definitions(text)
-        text = self._strip_link_definitions(text)
-
-        text = self._run_block_gamut(text)
-
-        if "footnotes" in self.extras:
-            text = self._add_footnotes(text)
-
-        text = self.postprocess(text)
-
-        text = self._unescape_special_chars(text)
-
-        if self.safe_mode:
-            text = self._unhash_html_spans(text)
-
-        if "nofollow" in self.extras:
-            text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
-
-        if "target-blank-links" in self.extras:
-            text = self._a_blank.sub(r'<\1 target="_blank"\2', text)
-
-        text += "\n"
-
-        rv = UnicodeWithAttrs(text)
-        if "toc" in self.extras:
-            rv._toc = self._toc
-        if "metadata" in self.extras:
-            rv.metadata = self.metadata
-        return rv
-
-    def postprocess(self, text):
-        """A hook for subclasses to do some postprocessing of the html, if
-        desired. This is called before unescaping of special chars and
-        unhashing of raw HTML spans.
-        """
-        return text
-
-    def preprocess(self, text):
-        """A hook for subclasses to do some preprocessing of the Markdown, if
-        desired. This is called after basic formatting of the text, but prior
-        to any extras, safe mode, etc. processing.
-        """
-        return text
-
-    # Is metadata if the content starts with optional '---'-fenced `key: value`
-    # pairs. E.g. (indented for presentation):
-    #   ---
-    #   foo: bar
-    #   another-var: blah blah
-    #   ---
-    #   # header
-    # or:
-    #   foo: bar
-    #   another-var: blah blah
-    #
-    #   # header
-    _meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|([\S\w]+\s*:(?! >)[ \t]*.*\n?)(?:---[\ \t]*\n)?', re.MULTILINE)
-    _key_val_pat = re.compile("[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE)
-    # this allows key: >
-    #                   value
-    #                   conutiues over multiple lines
-    _key_val_block_pat = re.compile(
-        "(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE)
-
-    def _extract_metadata(self, text):
-        match = re.findall(self._meta_data_pattern, text)
-
-        if not match:
-            return text
-
-        last_item = list(filter(None, match[-1]))[0]
-        end_of_metadata = text.index(last_item)+len(last_item)
-        if text.startswith("---"):
-            # add 8 charachters for opening and closing
-            # and since indexing starts at 0 we add a step
-            tail = text[end_of_metadata+4:]
-        else:
-            tail = text[end_of_metadata:]
-
-        kv = re.findall(self._key_val_pat, text)
-        kvm = re.findall(self._key_val_block_pat, text)
-        kvm = [item.replace(": >\n", ":", 1) for item in kvm]
-
-        for item in kv + kvm:
-            k, v = item.split(":", 1)
-            self.metadata[k.strip()] = v.strip()
-
-        return tail
-
-    _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
-    # This regular expression is intended to match blocks like this:
-    #    PREFIX Local Variables: SUFFIX
-    #    PREFIX mode: Tcl SUFFIX
-    #    PREFIX End: SUFFIX
-    # Some notes:
-    # - "[ \t]" is used instead of "\s" to specifically exclude newlines
-    # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
-    #   not like anything other than Unix-style line terminators.
-    _emacs_local_vars_pat = re.compile(r"""^
-        (?P<prefix>(?:[^\r\n|\n|\r])*?)
-        [\ \t]*Local\ Variables:[\ \t]*
-        (?P<suffix>.*?)(?:\r\n|\n|\r)
-        (?P<content>.*?\1End:)
-        """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
-
-    def _get_emacs_vars(self, text):
-        """Return a dictionary of emacs-style local variables.
-
-        Parsing is done loosely according to this spec (and according to
-        some in-practice deviations from this):
-        http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
-        """
-        emacs_vars = {}
-        SIZE = pow(2, 13)  # 8kB
-
-        # Search near the start for a '-*-'-style one-liner of variables.
-        head = text[:SIZE]
-        if "-*-" in head:
-            match = self._emacs_oneliner_vars_pat.search(head)
-            if match:
-                emacs_vars_str = match.group(1)
-                assert '\n' not in emacs_vars_str
-                emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
-                                  if s.strip()]
-                if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
-                    # While not in the spec, this form is allowed by emacs:
-                    #   -*- Tcl -*-
-                    # where the implied "variable" is "mode". This form
-                    # is only allowed if there are no other variables.
-                    emacs_vars["mode"] = emacs_var_strs[0].strip()
-                else:
-                    for emacs_var_str in emacs_var_strs:
-                        try:
-                            variable, value = emacs_var_str.strip().split(':', 1)
-                        except ValueError:
-                            log.debug("emacs variables error: malformed -*- "
-                                      "line: %r", emacs_var_str)
-                            continue
-                        # Lowercase the variable name because Emacs allows "Mode"
-                        # or "mode" or "MoDe", etc.
-                        emacs_vars[variable.lower()] = value.strip()
-
-        tail = text[-SIZE:]
-        if "Local Variables" in tail:
-            match = self._emacs_local_vars_pat.search(tail)
-            if match:
-                prefix = match.group("prefix")
-                suffix = match.group("suffix")
-                lines = match.group("content").splitlines(0)
-                # print "prefix=%r, suffix=%r, content=%r, lines: %s"\
-                #      % (prefix, suffix, match.group("content"), lines)
-
-                # Validate the Local Variables block: proper prefix and suffix
-                # usage.
-                for i, line in enumerate(lines):
-                    if not line.startswith(prefix):
-                        log.debug("emacs variables error: line '%s' "
-                                  "does not use proper prefix '%s'"
-                                  % (line, prefix))
-                        return {}
-                    # Don't validate suffix on last line. Emacs doesn't care,
-                    # neither should we.
-                    if i != len(lines)-1 and not line.endswith(suffix):
-                        log.debug("emacs variables error: line '%s' "
-                                  "does not use proper suffix '%s'"
-                                  % (line, suffix))
-                        return {}
-
-                # Parse out one emacs var per line.
-                continued_for = None
-                for line in lines[:-1]:  # no var on the last line ("PREFIX End:")
-                    if prefix: line = line[len(prefix):]  # strip prefix
-                    if suffix: line = line[:-len(suffix)]  # strip suffix
-                    line = line.strip()
-                    if continued_for:
-                        variable = continued_for
-                        if line.endswith('\\'):
-                            line = line[:-1].rstrip()
-                        else:
-                            continued_for = None
-                        emacs_vars[variable] += ' ' + line
-                    else:
-                        try:
-                            variable, value = line.split(':', 1)
-                        except ValueError:
-                            log.debug("local variables error: missing colon "
-                                      "in local variables entry: '%s'" % line)
-                            continue
-                        # Do NOT lowercase the variable name, because Emacs only
-                        # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
-                        value = value.strip()
-                        if value.endswith('\\'):
-                            value = value[:-1].rstrip()
-                            continued_for = variable
-                        else:
-                            continued_for = None
-                        emacs_vars[variable] = value
-
-        # Unquote values.
-        for var, val in list(emacs_vars.items()):
-            if len(val) > 1 and (val.startswith('"') and val.endswith('"')
-               or val.startswith('"') and val.endswith('"')):
-                emacs_vars[var] = val[1:-1]
-
-        return emacs_vars
-
-    def _detab_line(self, line):
-        r"""Recusively convert tabs to spaces in a single line.
-
-        Called from _detab()."""
-        if '\t' not in line:
-            return line
-        chunk1, chunk2 = line.split('\t', 1)
-        chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width))
-        output = chunk1 + chunk2
-        return self._detab_line(output)
-
-    def _detab(self, text):
-        r"""Iterate text line by line and convert tabs to spaces.
-
-            >>> m = Markdown()
-            >>> m._detab("\tfoo")
-            '    foo'
-            >>> m._detab("  \tfoo")
-            '    foo'
-            >>> m._detab("\t  foo")
-            '      foo'
-            >>> m._detab("  foo")
-            '  foo'
-            >>> m._detab("  foo\n\tbar\tblam")
-            '  foo\n    bar blam'
-        """
-        if '\t' not in text:
-            return text
-        output = []
-        for line in text.splitlines():
-            output.append(self._detab_line(line))
-        return '\n'.join(output)
-
-    # I broke out the html5 tags here and add them to _block_tags_a and
-    # _block_tags_b.  This way html5 tags are easy to keep track of.
-    _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
-
-    _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
-    _block_tags_a += _html5tags
-
-    _strict_tag_block_re = re.compile(r"""
-        (                       # save in \1
-            ^                   # start of line  (with re.M)
-            <(%s)               # start tag = \2
-            \b                  # word break
-            (.*\n)*?            # any number of lines, minimally matching
-            </\2>               # the matching end tag
-            [ \t]*              # trailing spaces/tabs
-            (?=\n+|\Z)          # followed by a newline or end of document
-        )
-        """ % _block_tags_a,
-        re.X | re.M)
-
-    _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
-    _block_tags_b += _html5tags
-
-    _liberal_tag_block_re = re.compile(r"""
-        (                       # save in \1
-            ^                   # start of line  (with re.M)
-            <(%s)               # start tag = \2
-            \b                  # word break
-            (.*\n)*?            # any number of lines, minimally matching
-            .*</\2>             # the matching end tag
-            [ \t]*              # trailing spaces/tabs
-            (?=\n+|\Z)          # followed by a newline or end of document
-        )
-        """ % _block_tags_b,
-        re.X | re.M)
-
-    _html_markdown_attr_re = re.compile(
-        r'''\s+markdown=("1"|'1')''')
-    def _hash_html_block_sub(self, match, raw=False):
-        html = match.group(1)
-        if raw and self.safe_mode:
-            html = self._sanitize_html(html)
-        elif 'markdown-in-html' in self.extras and 'markdown=' in html:
-            first_line = html.split('\n', 1)[0]
-            m = self._html_markdown_attr_re.search(first_line)
-            if m:
-                lines = html.split('\n')
-                middle = '\n'.join(lines[1:-1])
-                last_line = lines[-1]
-                first_line = first_line[:m.start()] + first_line[m.end():]
-                f_key = _hash_text(first_line)
-                self.html_blocks[f_key] = first_line
-                l_key = _hash_text(last_line)
-                self.html_blocks[l_key] = last_line
-                return ''.join(["\n\n", f_key,
-                    "\n\n", middle, "\n\n",
-                    l_key, "\n\n"])
-        key = _hash_text(html)
-        self.html_blocks[key] = html
-        return "\n\n" + key + "\n\n"
-
-    def _hash_html_blocks(self, text, raw=False):
-        """Hashify HTML blocks
-
-        We only want to do this for block-level HTML tags, such as headers,
-        lists, and tables. That's because we still want to wrap <p>s around
-        "paragraphs" that are wrapped in non-block-level tags, such as anchors,
-        phrase emphasis, and spans. The list of tags we're looking for is
-        hard-coded.
-
-        @param raw {boolean} indicates if these are raw HTML blocks in
-            the original source. It makes a difference in "safe" mode.
-        """
-        if '<' not in text:
-            return text
-
-        # Pass `raw` value into our calls to self._hash_html_block_sub.
-        hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
-
-        # First, look for nested blocks, e.g.:
-        #   <div>
-        #       <div>
-        #       tags for inner block must be indented.
-        #       </div>
-        #   </div>
-        #
-        # The outermost tags must start at the left margin for this to match, and
-        # the inner nested divs must be indented.
-        # We need to do this before the next, more liberal match, because the next
-        # match will start at the first `<div>` and stop at the first `</div>`.
-        text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
-
-        # Now match more liberally, simply from `\n<tag>` to `</tag>\n`
-        text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
-
-        # Special case just for <hr />. It was easier to make a special
-        # case than to make the other regex more complicated.
-        if "<hr" in text:
-            _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
-            text = _hr_tag_re.sub(hash_html_block_sub, text)
-
-        # Special case for standalone HTML comments:
-        if "<!--" in text:
-            start = 0
-            while True:
-                # Delimiters for next comment block.
-                try:
-                    start_idx = text.index("<!--", start)
-                except ValueError:
-                    break
-                try:
-                    end_idx = text.index("-->", start_idx) + 3
-                except ValueError:
-                    break
-
-                # Start position for next comment block search.
-                start = end_idx
-
-                # Validate whitespace before comment.
-                if start_idx:
-                    # - Up to `tab_width - 1` spaces before start_idx.
-                    for i in range(self.tab_width - 1):
-                        if text[start_idx - 1] != ' ':
-                            break
-                        start_idx -= 1
-                        if start_idx == 0:
-                            break
-                    # - Must be preceded by 2 newlines or hit the start of
-                    #   the document.
-                    if start_idx == 0:
-                        pass
-                    elif start_idx == 1 and text[0] == '\n':
-                        start_idx = 0  # to match minute detail of Markdown.pl regex
-                    elif text[start_idx-2:start_idx] == '\n\n':
-                        pass
-                    else:
-                        break
-
-                # Validate whitespace after comment.
-                # - Any number of spaces and tabs.
-                while end_idx < len(text):
-                    if text[end_idx] not in ' \t':
-                        break
-                    end_idx += 1
-                # - Must be following by 2 newlines or hit end of text.
-                if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
-                    continue
-
-                # Escape and hash (must match `_hash_html_block_sub`).
-                html = text[start_idx:end_idx]
-                if raw and self.safe_mode:
-                    html = self._sanitize_html(html)
-                key = _hash_text(html)
-                self.html_blocks[key] = html
-                text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
-
-        if "xml" in self.extras:
-            # Treat XML processing instructions and namespaced one-liner
-            # tags as if they were block HTML tags. E.g., if standalone
-            # (i.e. are their own paragraph), the following do not get
-            # wrapped in a <p> tag:
-            #    <?foo bar?>
-            #
-            #    <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
-            _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
-            text = _xml_oneliner_re.sub(hash_html_block_sub, text)
-
-        return text
-
-    def _strip_link_definitions(self, text):
-        # Strips link definitions from text, stores the URLs and titles in
-        # hash references.
-        less_than_tab = self.tab_width - 1
-
-        # Link defs are in the form:
-        #   [id]: url "optional title"
-        _link_def_re = re.compile(r"""
-            ^[ ]{0,%d}\[(.+)\]: # id = \1
-              [ \t]*
-              \n?               # maybe *one* newline
-              [ \t]*
-            <?(.+?)>?           # url = \2
-              [ \t]*
-            (?:
-                \n?             # maybe one newline
-                [ \t]*
-                (?<=\s)         # lookbehind for whitespace
-                ['"(]
-                ([^\n]*)        # title = \3
-                ['")]
-                [ \t]*
-            )?  # title is optional
-            (?:\n+|\Z)
-            """ % less_than_tab, re.X | re.M | re.U)
-        return _link_def_re.sub(self._extract_link_def_sub, text)
-
-    def _extract_link_def_sub(self, match):
-        id, url, title = match.groups()
-        key = id.lower()    # Link IDs are case-insensitive
-        self.urls[key] = self._encode_amps_and_angles(url)
-        if title:
-            self.titles[key] = title
-        return ""
-
-    def _do_numbering(self, text):
-        ''' We handle the special extension for generic numbering for
-            tables, figures etc.
-        '''
-        # First pass to define all the references
-        self.regex_defns = re.compile(r'''
-            \[\#(\w+)\s* # the counter.  Open square plus hash plus a word \1
-            ([^@]*)\s*   # Some optional characters, that aren't an @. \2
-            @(\w+)       # the id.  Should this be normed? \3
-            ([^\]]*)\]   # The rest of the text up to the terminating ] \4
-            ''', re.VERBOSE)
-        self.regex_subs = re.compile(r"\[@(\w+)\s*\]")  # [@ref_id]
-        counters = {}
-        references = {}
-        replacements = []
-        definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>'
-        reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>'
-        for match in self.regex_defns.finditer(text):
-            # We must have four match groups otherwise this isn't a numbering reference
-            if len(match.groups()) != 4:
-                continue
-            counter = match.group(1)
-            text_before = match.group(2)
-            ref_id = match.group(3)
-            text_after = match.group(4)
-            number = counters.get(counter, 1)
-            references[ref_id] = (number, counter)
-            replacements.append((match.start(0),
-                                 definition_html.format(counter,
-                                                        ref_id,
-                                                        text_before,
-                                                        number,
-                                                        text_after),
-                                 match.end(0)))
-            counters[counter] = number + 1
-        for repl in reversed(replacements):
-            text = text[:repl[0]] + repl[1] + text[repl[2]:]
-
-        # Second pass to replace the references with the right
-        # value of the counter
-        # Fwiw, it's vaguely annoying to have to turn the iterator into
-        # a list and then reverse it but I can't think of a better thing to do.
-        for match in reversed(list(self.regex_subs.finditer(text))):
-            number, counter = references.get(match.group(1), (None, None))
-            if number is not None:
-                repl = reference_html.format(counter,
-                                             match.group(1),
-                                             number)
-            else:
-                repl = reference_html.format(match.group(1),
-                                             'countererror',
-                                             '?' + match.group(1) + '?')
-            if "smarty-pants" in self.extras:
-                repl = repl.replace('"', self._escape_table['"'])
-
-            text = text[:match.start()] + repl + text[match.end():]
-        return text
-
-    def _extract_footnote_def_sub(self, match):
-        id, text = match.groups()
-        text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
-        normed_id = re.sub(r'\W', '-', id)
-        # Ensure footnote text ends with a couple newlines (for some
-        # block gamut matches).
-        self.footnotes[normed_id] = text + "\n\n"
-        return ""
-
-    def _strip_footnote_definitions(self, text):
-        """A footnote definition looks like this:
-
-            [^note-id]: Text of the note.
-
-                May include one or more indented paragraphs.
-
-        Where,
-        - The 'note-id' can be pretty much anything, though typically it
-          is the number of the footnote.
-        - The first paragraph may start on the next line, like so:
-
-            [^note-id]:
-                Text of the note.
-        """
-        less_than_tab = self.tab_width - 1
-        footnote_def_re = re.compile(r'''
-            ^[ ]{0,%d}\[\^(.+)\]:   # id = \1
-            [ \t]*
-            (                       # footnote text = \2
-              # First line need not start with the spaces.
-              (?:\s*.*\n+)
-              (?:
-                (?:[ ]{%d} | \t)  # Subsequent lines must be indented.
-                .*\n+
-              )*
-            )
-            # Lookahead for non-space at line-start, or end of doc.
-            (?:(?=^[ ]{0,%d}\S)|\Z)
-            ''' % (less_than_tab, self.tab_width, self.tab_width),
-            re.X | re.M)
-        return footnote_def_re.sub(self._extract_footnote_def_sub, text)
-
-    _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
-
-    def _run_block_gamut(self, text):
-        # These are all the transformations that form block-level
-        # tags like paragraphs, headers, and list items.
-
-        if "fenced-code-blocks" in self.extras:
-            text = self._do_fenced_code_blocks(text)
-
-        text = self._do_headers(text)
-
-        # Do Horizontal Rules:
-        # On the number of spaces in horizontal rules: The spec is fuzzy: "If
-        # you wish, you may use spaces between the hyphens or asterisks."
-        # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
-        # hr chars to one or two. We'll reproduce that limit here.
-        hr = "\n<hr"+self.empty_element_suffix+"\n"
-        text = re.sub(self._hr_re, hr, text)
-
-        text = self._do_lists(text)
-
-        if "pyshell" in self.extras:
-            text = self._prepare_pyshell_blocks(text)
-        if "wiki-tables" in self.extras:
-            text = self._do_wiki_tables(text)
-        if "tables" in self.extras:
-            text = self._do_tables(text)
-
-        text = self._do_code_blocks(text)
-
-        text = self._do_block_quotes(text)
-
-        # We already ran _HashHTMLBlocks() before, in Markdown(), but that
-        # was to escape raw HTML in the original Markdown source. This time,
-        # we're escaping the markup we've just created, so that we don't wrap
-        # <p> tags around block-level tags.
-        text = self._hash_html_blocks(text)
-
-        text = self._form_paragraphs(text)
-
-        return text
-
-    def _pyshell_block_sub(self, match):
-        lines = match.group(0).splitlines(0)
-        _dedentlines(lines)
-        indent = ' ' * self.tab_width
-        s = ('\n'  # separate from possible cuddled paragraph
-             + indent + ('\n'+indent).join(lines)
-             + '\n\n')
-        return s
-
-    def _prepare_pyshell_blocks(self, text):
-        """Ensure that Python interactive shell sessions are put in
-        code blocks -- even if not properly indented.
-        """
-        if ">>>" not in text:
-            return text
-
-        less_than_tab = self.tab_width - 1
-        _pyshell_block_re = re.compile(r"""
-            ^([ ]{0,%d})>>>[ ].*\n   # first line
-            ^(\1.*\S+.*\n)*         # any number of subsequent lines
-            ^\n                     # ends with a blank line
-            """ % less_than_tab, re.M | re.X)
-
-        return _pyshell_block_re.sub(self._pyshell_block_sub, text)
-
-    def _table_sub(self, match):
-        trim_space_re = '^[ \t\n]+|[ \t\n]+$'
-        trim_bar_re = '^\||\|$'
-
-        head, underline, body = match.groups()
-
-        # Determine aligns for columns.
-        cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)).split('|')]
-        align_from_col_idx = {}
-        for col_idx, col in enumerate(cols):
-            if col[0] == ':' and col[-1] == ':':
-                align_from_col_idx[col_idx] = ' align="center"'
-            elif col[0] == ':':
-                align_from_col_idx[col_idx] = ' align="left"'
-            elif col[-1] == ':':
-                align_from_col_idx[col_idx] = ' align="right"'
-
-        # thead
-        hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<thead>', '<tr>']
-        cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)).split('|')]
-        for col_idx, col in enumerate(cols):
-            hlines.append('  <th%s>%s</th>' % (
-                align_from_col_idx.get(col_idx, ''),
-                self._run_span_gamut(col)
-            ))
-        hlines.append('</tr>')
-        hlines.append('</thead>')
-
-        # tbody
-        hlines.append('<tbody>')
-        for line in body.strip('\n').split('\n'):
-            hlines.append('<tr>')
-            cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)).split('|')]
-            for col_idx, col in enumerate(cols):
-                hlines.append('  <td%s>%s</td>' % (
-                    align_from_col_idx.get(col_idx, ''),
-                    self._run_span_gamut(col)
-                ))
-            hlines.append('</tr>')
-        hlines.append('</tbody>')
-        hlines.append('</table>')
-
-        return '\n'.join(hlines) + '\n'
-
-    def _do_tables(self, text):
-        """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
-        https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
-        """
-        less_than_tab = self.tab_width - 1
-        table_re = re.compile(r'''
-                (?:(?<=\n\n)|\A\n?)             # leading blank line
-
-                ^[ ]{0,%d}                      # allowed whitespace
-                (.*[|].*)  \n                   # $1: header row (at least one pipe)
-
-                ^[ ]{0,%d}                      # allowed whitespace
-                (                               # $2: underline row
-                    # underline row with leading bar
-                    (?:  \|\ *:?-+:?\ *  )+  \|?  \n
-                    |
-                    # or, underline row without leading bar
-                    (?:  \ *:?-+:?\ *\|  )+  (?:  \ *:?-+:?\ *  )?  \n
-                )
-
-                (                               # $3: data rows
-                    (?:
-                        ^[ ]{0,%d}(?!\ )         # ensure line begins with 0 to less_than_tab spaces
-                        .*\|.*  \n
-                    )+
-                )
-            ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
-        return table_re.sub(self._table_sub, text)
-
-    def _wiki_table_sub(self, match):
-        ttext = match.group(0).strip()
-        # print 'wiki table: %r' % match.group(0)
-        rows = []
-        for line in ttext.splitlines(0):
-            line = line.strip()[2:-2].strip()
-            row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
-            rows.append(row)
-        # pprint(rows)
-        hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<tbody>']
-        for row in rows:
-            hrow = ['<tr>']
-            for cell in row:
-                hrow.append('<td>')
-                hrow.append(self._run_span_gamut(cell))
-                hrow.append('</td>')
-            hrow.append('</tr>')
-            hlines.append(''.join(hrow))
-        hlines += ['</tbody>', '</table>']
-        return '\n'.join(hlines) + '\n'
-
-    def _do_wiki_tables(self, text):
-        # Optimization.
-        if "||" not in text:
-            return text
-
-        less_than_tab = self.tab_width - 1
-        wiki_table_re = re.compile(r'''
-            (?:(?<=\n\n)|\A\n?)            # leading blank line
-            ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n  # first line
-            (^\1\|\|.+?\|\|\n)*        # any number of subsequent lines
-            ''' % less_than_tab, re.M | re.X)
-        return wiki_table_re.sub(self._wiki_table_sub, text)
-
-    def _run_span_gamut(self, text):
-        # These are all the transformations that occur *within* block-level
-        # tags like paragraphs, headers, and list items.
-
-        text = self._do_code_spans(text)
-
-        text = self._escape_special_chars(text)
-
-        # Process anchor and image tags.
-        text = self._do_links(text)
-
-        # Make links out of things like `<http://example.com/>`
-        # Must come after _do_links(), because you can use < and >
-        # delimiters in inline links like [this](<url>).
-        text = self._do_auto_links(text)
-
-        if "link-patterns" in self.extras:
-            text = self._do_link_patterns(text)
-
-        text = self._encode_amps_and_angles(text)
-
-        if "strike" in self.extras:
-            text = self._do_strike(text)
-
-        text = self._do_italics_and_bold(text)
-
-        if "smarty-pants" in self.extras:
-            text = self._do_smart_punctuation(text)
-
-        # Do hard breaks:
-        if "break-on-newline" in self.extras:
-            text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
-        else:
-            text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
-
-        return text
-
-    # "Sorta" because auto-links are identified as "tag" tokens.
-    _sorta_html_tokenize_re = re.compile(r"""
-        (
-            # tag
-            </?
-            (?:\w+)                                     # tag name
-            (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))*  # attributes
-            \s*/?>
-            |
-            # auto-link (e.g., <http://www.activestate.com/>)
-            <\w+[^>]*>
-            |
-            <!--.*?-->      # comment
-            |
-            <\?.*?\?>       # processing instruction
-        )
-        """, re.X)
-
-    def _escape_special_chars(self, text):
-        # Python markdown note: the HTML tokenization here differs from
-        # that in Markdown.pl, hence the behaviour for subtle cases can
-        # differ (I believe the tokenizer here does a better job because
-        # it isn't susceptible to unmatched '<' and '>' in HTML tags).
-        # Note, however, that '>' is not allowed in an auto-link URL
-        # here.
-        escaped = []
-        is_html_markup = False
-        for token in self._sorta_html_tokenize_re.split(text):
-            if is_html_markup:
-                # Within tags/HTML-comments/auto-links, encode * and _
-                # so they don't conflict with their use in Markdown for
-                # italics and strong.  We're replacing each such
-                # character with its corresponding MD5 checksum value;
-                # this is likely overkill, but it should prevent us from
-                # colliding with the escape values by accident.
-                escaped.append(token.replace('*', self._escape_table['*'])
-                                    .replace('_', self._escape_table['_']))
-            else:
-                escaped.append(self._encode_backslash_escapes(token))
-            is_html_markup = not is_html_markup
-        return ''.join(escaped)
-
-    def _hash_html_spans(self, text):
-        # Used for safe_mode.
-
-        def _is_auto_link(s):
-            if ':' in s and self._auto_link_re.match(s):
-                return True
-            elif '@' in s and self._auto_email_link_re.match(s):
-                return True
-            return False
-
-        tokens = []
-        is_html_markup = False
-        for token in self._sorta_html_tokenize_re.split(text):
-            if is_html_markup and not _is_auto_link(token):
-                sanitized = self._sanitize_html(token)
-                key = _hash_text(sanitized)
-                self.html_spans[key] = sanitized
-                tokens.append(key)
-            else:
-                tokens.append(token)
-            is_html_markup = not is_html_markup
-        return ''.join(tokens)
-
-    def _unhash_html_spans(self, text):
-        for key, sanitized in list(self.html_spans.items()):
-            text = text.replace(key, sanitized)
-        return text
-
-    def _sanitize_html(self, s):
-        if self.safe_mode == "replace":
-            return self.html_removed_text
-        elif self.safe_mode == "escape":
-            replacements = [
-                ('&', '&amp;'),
-                ('<', '&lt;'),
-                ('>', '&gt;'),
-            ]
-            for before, after in replacements:
-                s = s.replace(before, after)
-            return s
-        else:
-            raise MarkdownError("invalid value for 'safe_mode': %r (must be "
-                                "'escape' or 'replace')" % self.safe_mode)
-
-    _inline_link_title = re.compile(r'''
-            (                   # \1
-              [ \t]+
-              (['"])            # quote char = \2
-              (?P<title>.*?)
-              \2
-            )?                  # title is optional
-          \)$
-        ''', re.X | re.S)
-    _tail_of_reference_link_re = re.compile(r'''
-          # Match tail of: [text][id]
-          [ ]?          # one optional space
-          (?:\n[ ]*)?   # one optional newline followed by spaces
-          \[
-            (?P<id>.*?)
-          \]
-        ''', re.X | re.S)
-
-    _whitespace = re.compile(r'\s*')
-
-    _strip_anglebrackets = re.compile(r'<(.*)>.*')
-
-    def _find_non_whitespace(self, text, start):
-        """Returns the index of the first non-whitespace character in text
-        after (and including) start
-        """
-        match = self._whitespace.match(text, start)
-        return match.end()
-
-    def _find_balanced(self, text, start, open_c, close_c):
-        """Returns the index where the open_c and close_c characters balance
-        out - the same number of open_c and close_c are encountered - or the
-        end of string if it's reached before the balance point is found.
-        """
-        i = start
-        l = len(text)
-        count = 1
-        while count > 0 and i < l:
-            if text[i] == open_c:
-                count += 1
-            elif text[i] == close_c:
-                count -= 1
-            i += 1
-        return i
-
-    def _extract_url_and_title(self, text, start):
-        """Extracts the url and (optional) title from the tail of a link"""
-        # text[start] equals the opening parenthesis
-        idx = self._find_non_whitespace(text, start+1)
-        if idx == len(text):
-            return None, None, None
-        end_idx = idx
-        has_anglebrackets = text[idx] == "<"
-        if has_anglebrackets:
-            end_idx = self._find_balanced(text, end_idx+1, "<", ">")
-        end_idx = self._find_balanced(text, end_idx, "(", ")")
-        match = self._inline_link_title.search(text, idx, end_idx)
-        if not match:
-            return None, None, None
-        url, title = text[idx:match.start()], match.group("title")
-        if has_anglebrackets:
-            url = self._strip_anglebrackets.sub(r'\1', url)
-        return url, title, end_idx
-
-    _safe_protocols = re.compile(r'(https?|ftp):', re.I)
-    def _do_links(self, text):
-        """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
-
-        This is a combination of Markdown.pl's _DoAnchors() and
-        _DoImages(). They are done together because that simplified the
-        approach. It was necessary to use a different approach than
-        Markdown.pl because of the lack of atomic matching support in
-        Python's regex engine used in $g_nested_brackets.
-        """
-        MAX_LINK_TEXT_SENTINEL = 3000  # markdown2 issue 24
-
-        # `anchor_allowed_pos` is used to support img links inside
-        # anchors, but not anchors inside anchors. An anchor's start
-        # pos must be `>= anchor_allowed_pos`.
-        anchor_allowed_pos = 0
-
-        curr_pos = 0
-        while True:  # Handle the next link.
-            # The next '[' is the start of:
-            # - an inline anchor:   [text](url "title")
-            # - a reference anchor: [text][id]
-            # - an inline img:      ![text](url "title")
-            # - a reference img:    ![text][id]
-            # - a footnote ref:     [^id]
-            #   (Only if 'footnotes' extra enabled)
-            # - a footnote defn:    [^id]: ...
-            #   (Only if 'footnotes' extra enabled) These have already
-            #   been stripped in _strip_footnote_definitions() so no
-            #   need to watch for them.
-            # - a link definition:  [id]: url "title"
-            #   These have already been stripped in
-            #   _strip_link_definitions() so no need to watch for them.
-            # - not markup:         [...anything else...
-            try:
-                start_idx = text.index('[', curr_pos)
-            except ValueError:
-                break
-            text_length = len(text)
-
-            # Find the matching closing ']'.
-            # Markdown.pl allows *matching* brackets in link text so we
-            # will here too. Markdown.pl *doesn't* currently allow
-            # matching brackets in img alt text -- we'll differ in that
-            # regard.
-            bracket_depth = 0
-            for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
-                                            text_length)):
-                ch = text[p]
-                if ch == ']':
-                    bracket_depth -= 1
-                    if bracket_depth < 0:
-                        break
-                elif ch == '[':
-                    bracket_depth += 1
-            else:
-                # Closing bracket not found within sentinel length.
-                # This isn't markup.
-                curr_pos = start_idx + 1
-                continue
-            link_text = text[start_idx+1:p]
-
-            # Possibly a footnote ref?
-            if "footnotes" in self.extras and link_text.startswith("^"):
-                normed_id = re.sub(r'\W', '-', link_text[1:])
-                if normed_id in self.footnotes:
-                    self.footnote_ids.append(normed_id)
-                    result = '<sup class="footnote-ref" id="fnref-%s">' \
-                             '<a href="#fn-%s">%s</a></sup>' \
-                             % (normed_id, normed_id, len(self.footnote_ids))
-                    text = text[:start_idx] + result + text[p+1:]
-                else:
-                    # This id isn't defined, leave the markup alone.
-                    curr_pos = p+1
-                continue
-
-            # Now determine what this is by the remainder.
-            p += 1
-            if p == text_length:
-                return text
-
-            # Inline anchor or img?
-            if text[p] == '(':  # attempt at perf improvement
-                url, title, url_end_idx = self._extract_url_and_title(text, p)
-                if url is not None:
-                    # Handle an inline anchor or img.
-                    is_img = start_idx > 0 and text[start_idx-1] == "!"
-                    if is_img:
-                        start_idx -= 1
-
-                    # We've got to encode these to avoid conflicting
-                    # with italics/bold.
-                    url = url.replace('*', self._escape_table['*']) \
-                             .replace('_', self._escape_table['_'])
-                    if title:
-                        title_str = ' title="%s"' % (
-                            _xml_escape_attr(title)
-                                .replace('*', self._escape_table['*'])
-                                .replace('_', self._escape_table['_']))
-                    else:
-                        title_str = ''
-                    if is_img:
-                        img_class_str = self._html_class_str_from_tag("img")
-                        result = '<img src="%s" alt="%s"%s%s%s' \
-                            % (_urlencode(url, safe_mode=self.safe_mode),
-                               _xml_escape_attr(link_text),
-                               title_str,
-                               img_class_str,
-                               self.empty_element_suffix)
-                        if "smarty-pants" in self.extras:
-                            result = result.replace('"', self._escape_table['"'])
-                        curr_pos = start_idx + len(result)
-                        text = text[:start_idx] + result + text[url_end_idx:]
-                    elif start_idx >= anchor_allowed_pos:
-                        if self.safe_mode and not self._safe_protocols.match(url):
-                            result_head = '<a href="#"%s>' % (title_str)
-                        else:
-                            result_head = '<a href="%s"%s>' % (_urlencode(url, safe_mode=self.safe_mode), title_str)
-                        result = '%s%s</a>' % (result_head, _xml_escape_attr(link_text))
-                        if "smarty-pants" in self.extras:
-                            result = result.replace('"', self._escape_table['"'])
-                        # <img> allowed from curr_pos on, <a> from
-                        # anchor_allowed_pos on.
-                        curr_pos = start_idx + len(result_head)
-                        anchor_allowed_pos = start_idx + len(result)
-                        text = text[:start_idx] + result + text[url_end_idx:]
-                    else:
-                        # Anchor not allowed here.
-                        curr_pos = start_idx + 1
-                    continue
-
-            # Reference anchor or img?
-            else:
-                match = self._tail_of_reference_link_re.match(text, p)
-                if match:
-                    # Handle a reference-style anchor or img.
-                    is_img = start_idx > 0 and text[start_idx-1] == "!"
-                    if is_img:
-                        start_idx -= 1
-                    link_id = match.group("id").lower()
-                    if not link_id:
-                        link_id = link_text.lower()  # for links like [this][]
-                    if link_id in self.urls:
-                        url = self.urls[link_id]
-                        # We've got to encode these to avoid conflicting
-                        # with italics/bold.
-                        url = url.replace('*', self._escape_table['*']) \
-                                 .replace('_', self._escape_table['_'])
-                        title = self.titles.get(link_id)
-                        if title:
-                            title = _xml_escape_attr(title) \
-                                .replace('*', self._escape_table['*']) \
-                                .replace('_', self._escape_table['_'])
-                            title_str = ' title="%s"' % title
-                        else:
-                            title_str = ''
-                        if is_img:
-                            img_class_str = self._html_class_str_from_tag("img")
-                            result = '<img src="%s" alt="%s"%s%s%s' \
-                                % (_urlencode(url, safe_mode=self.safe_mode),
-                                   _xml_escape_attr(link_text),
-                                   title_str,
-                                   img_class_str,
-                                   self.empty_element_suffix)
-                            if "smarty-pants" in self.extras:
-                                result = result.replace('"', self._escape_table['"'])
-                            curr_pos = start_idx + len(result)
-                            text = text[:start_idx] + result + text[match.end():]
-                        elif start_idx >= anchor_allowed_pos:
-                            if self.safe_mode and not self._safe_protocols.match(url):
-                                result_head = '<a href="#"%s>' % (title_str)
-                            else:
-                                result_head = '<a href="%s"%s>' % (_urlencode(url, safe_mode=self.safe_mode), title_str)
-                            result = '%s%s</a>' % (result_head, link_text)
-                            if "smarty-pants" in self.extras:
-                                result = result.replace('"', self._escape_table['"'])
-                            # <img> allowed from curr_pos on, <a> from
-                            # anchor_allowed_pos on.
-                            curr_pos = start_idx + len(result_head)
-                            anchor_allowed_pos = start_idx + len(result)
-                            text = text[:start_idx] + result + text[match.end():]
-                        else:
-                            # Anchor not allowed here.
-                            curr_pos = start_idx + 1
-                    else:
-                        # This id isn't defined, leave the markup alone.
-                        curr_pos = match.end()
-                    continue
-
-            # Otherwise, it isn't markup.
-            curr_pos = start_idx + 1
-
-        return text
-
-    def header_id_from_text(self, text, prefix, n):
-        """Generate a header id attribute value from the given header
-        HTML content.
-
-        This is only called if the "header-ids" extra is enabled.
-        Subclasses may override this for different header ids.
-
-        @param text {str} The text of the header tag
-        @param prefix {str} The requested prefix for header ids. This is the
-            value of the "header-ids" extra key, if any. Otherwise, None.
-        @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
-        @returns {str} The value for the header tag's "id" attribute. Return
-            None to not have an id attribute and to exclude this header from
-            the TOC (if the "toc" extra is specified).
-        """
-        header_id = _slugify(text)
-        if prefix and isinstance(prefix, base_string_type):
-            header_id = prefix + '-' + header_id
-        if header_id in self._count_from_header_id:
-            self._count_from_header_id[header_id] += 1
-            header_id += '-%s' % self._count_from_header_id[header_id]
-        else:
-            self._count_from_header_id[header_id] = 1
-        return header_id
-
-    _toc = None
-    def _toc_add_entry(self, level, id, name):
-        if self._toc is None:
-            self._toc = []
-        self._toc.append((level, id, self._unescape_special_chars(name)))
-
-    _h_re_base = r'''
-        (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
-        |
-        (^(\#{1,6})  # \1 = string of #'s
-        [ \t]%s
-        (.+?)       # \2 = Header text
-        [ \t]*
-        (?<!\\)     # ensure not an escaped trailing '#'
-        \#*         # optional closing #'s (not counted)
-        \n+
-        )
-        '''
-
-    _h_re = re.compile(_h_re_base % '*', re.X | re.M)
-    _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
-
-    def _h_sub(self, match):
-        if match.group(1) is not None:
-            # Setext header
-            n = {"=": 1, "-": 2}[match.group(3)[0]]
-            header_group = match.group(2)
-        else:
-            # atx header
-            n = len(match.group(5))
-            header_group = match.group(6)
-
-        demote_headers = self.extras.get("demote-headers")
-        if demote_headers:
-            n = min(n + demote_headers, 6)
-        header_id_attr = ""
-        if "header-ids" in self.extras:
-            header_id = self.header_id_from_text(header_group,
-                self.extras["header-ids"], n)
-            if header_id:
-                header_id_attr = ' id="%s"' % header_id
-        html = self._run_span_gamut(header_group)
-        if "toc" in self.extras and header_id:
-            self._toc_add_entry(n, header_id, html)
-        return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
-
-    def _do_headers(self, text):
-        # Setext-style headers:
-        #     Header 1
-        #     ========
-        #
-        #     Header 2
-        #     --------
-
-        # atx-style headers:
-        #   # Header 1
-        #   ## Header 2
-        #   ## Header 2 with closing hashes ##
-        #   ...
-        #   ###### Header 6
-
-        if 'tag-friendly' in self.extras:
-            return self._h_re_tag_friendly.sub(self._h_sub, text)
-        return self._h_re.sub(self._h_sub, text)
-
-    _marker_ul_chars = '*+-'
-    _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
-    _marker_ul = '(?:[%s])' % _marker_ul_chars
-    _marker_ol = r'(?:\d+\.)'
-
-    def _list_sub(self, match):
-        lst = match.group(1)
-        lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
-        result = self._process_list_items(lst)
-        if self.list_level:
-            return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
-        else:
-            return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
-
-    def _do_lists(self, text):
-        # Form HTML ordered (numbered) and unordered (bulleted) lists.
-
-        # Iterate over each *non-overlapping* list match.
-        pos = 0
-        while True:
-            # Find the *first* hit for either list style (ul or ol). We
-            # match ul and ol separately to avoid adjacent lists of different
-            # types running into each other (see issue #16).
-            hits = []
-            for marker_pat in (self._marker_ul, self._marker_ol):
-                less_than_tab = self.tab_width - 1
-                whole_list = r'''
-                    (                   # \1 = whole list
-                      (                 # \2
-                        [ ]{0,%d}
-                        (%s)            # \3 = first list item marker
-                        [ \t]+
-                        (?!\ *\3\ )     # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
-                      )
-                      (?:.+?)
-                      (                 # \4
-                          \Z
-                        |
-                          \n{2,}
-                          (?=\S)
-                          (?!           # Negative lookahead for another list item marker
-                            [ \t]*
-                            %s[ \t]+
-                          )
-                      )
-                    )
-                ''' % (less_than_tab, marker_pat, marker_pat)
-                if self.list_level:  # sub-list
-                    list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
-                else:
-                    list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
-                                         re.X | re.M | re.S)
-                match = list_re.search(text, pos)
-                if match:
-                    hits.append((match.start(), match))
-            if not hits:
-                break
-            hits.sort()
-            match = hits[0][1]
-            start, end = match.span()
-            middle = self._list_sub(match)
-            text = text[:start] + middle + text[end:]
-            pos = start + len(middle)  # start pos for next attempted match
-
-        return text
-
-    _list_item_re = re.compile(r'''
-        (\n)?                   # leading line = \1
-        (^[ \t]*)               # leading whitespace = \2
-        (?P<marker>%s) [ \t]+   # list marker = \3
-        ((?:.+?)                # list item text = \4
-        (\n{1,2}))              # eols = \5
-        (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
-        ''' % (_marker_any, _marker_any),
-        re.M | re.X | re.S)
-
-    _task_list_item_re = re.compile(r'''
-        (\[[\ x]\])[ \t]+       # tasklist marker = \1
-        (.*)                   # list item text = \2
-    ''', re.M | re.X | re.S)
-
-    _task_list_warpper_str = r'<p><input type="checkbox" class="task-list-item-checkbox" %sdisabled>%s</p>'
-
-    def _task_list_item_sub(self, match):
-        marker = match.group(1)
-        item_text = match.group(2)
-        if marker == '[x]':
-                return self._task_list_warpper_str % ('checked ', item_text)
-        elif marker == '[ ]':
-                return self._task_list_warpper_str % ('', item_text)
-
-    _last_li_endswith_two_eols = False
-    def _list_item_sub(self, match):
-        item = match.group(4)
-        leading_line = match.group(1)
-        if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
-            item = self._run_block_gamut(self._outdent(item))
-        else:
-            # Recursion for sub-lists:
-            item = self._do_lists(self._outdent(item))
-            if item.endswith('\n'):
-                item = item[:-1]
-            item = self._run_span_gamut(item)
-        self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
-
-        if "task_list" in self.extras:
-            item = self._task_list_item_re.sub(self._task_list_item_sub, item)
-
-        return "<li>%s</li>\n" % item
-
-    def _process_list_items(self, list_str):
-        # Process the contents of a single ordered or unordered list,
-        # splitting it into individual list items.
-
-        # The $g_list_level global keeps track of when we're inside a list.
-        # Each time we enter a list, we increment it; when we leave a list,
-        # we decrement. If it's zero, we're not in a list anymore.
-        #
-        # We do this because when we're not inside a list, we want to treat
-        # something like this:
-        #
-        #       I recommend upgrading to version
-        #       8. Oops, now this line is treated
-        #       as a sub-list.
-        #
-        # As a single paragraph, despite the fact that the second line starts
-        # with a digit-period-space sequence.
-        #
-        # Whereas when we're inside a list (or sub-list), that line will be
-        # treated as the start of a sub-list. What a kludge, huh? This is
-        # an aspect of Markdown's syntax that's hard to parse perfectly
-        # without resorting to mind-reading. Perhaps the solution is to
-        # change the syntax rules such that sub-lists must start with a
-        # starting cardinal number; e.g. "1." or "a.".
-        self.list_level += 1
-        self._last_li_endswith_two_eols = False
-        list_str = list_str.rstrip('\n') + '\n'
-        list_str = self._list_item_re.sub(self._list_item_sub, list_str)
-        self.list_level -= 1
-        return list_str
-
-    def _get_pygments_lexer(self, lexer_name):
-        try:
-            from pygments import lexers, util
-        except ImportError:
-            return None
-        try:
-            return lexers.get_lexer_by_name(lexer_name)
-        except util.ClassNotFound:
-            return None
-
-    def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
-        import pygments
-        import pygments.formatters
-
-        class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
-            def _wrap_code(self, inner):
-                """A function for use in a Pygments Formatter which
-                wraps in <code> tags.
-                """
-                yield 0, "<code>"
-                for tup in inner:
-                    yield tup
-                yield 0, "</code>"
-
-            def wrap(self, source, outfile):
-                """Return the source with a code, pre, and div."""
-                return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
-
-        formatter_opts.setdefault("cssclass", "codehilite")
-        formatter = HtmlCodeFormatter(**formatter_opts)
-        return pygments.highlight(codeblock, lexer, formatter)
-
-    def _code_block_sub(self, match, is_fenced_code_block=False):
-        lexer_name = None
-        if is_fenced_code_block:
-            lexer_name = match.group(1)
-            if lexer_name:
-                formatter_opts = self.extras['fenced-code-blocks'] or {}
-            codeblock = match.group(2)
-            codeblock = codeblock[:-1]  # drop one trailing newline
-        else:
-            codeblock = match.group(1)
-            codeblock = self._outdent(codeblock)
-            codeblock = self._detab(codeblock)
-            codeblock = codeblock.lstrip('\n')  # trim leading newlines
-            codeblock = codeblock.rstrip()      # trim trailing whitespace
-
-            # Note: "code-color" extra is DEPRECATED.
-            if "code-color" in self.extras and codeblock.startswith(":::"):
-                lexer_name, rest = codeblock.split('\n', 1)
-                lexer_name = lexer_name[3:].strip()
-                codeblock = rest.lstrip("\n")   # Remove lexer declaration line.
-                formatter_opts = self.extras['code-color'] or {}
-
-        if lexer_name:
-            def unhash_code(codeblock):
-                for key, sanitized in list(self.html_spans.items()):
-                    codeblock = codeblock.replace(key, sanitized)
-                replacements = [
-                    ("&amp;", "&"),
-                    ("&lt;", "<"),
-                    ("&gt;", ">")
-                ]
-                for old, new in replacements:
-                    codeblock = codeblock.replace(old, new)
-                return codeblock
-            lexer = self._get_pygments_lexer(lexer_name)
-            if lexer:
-                codeblock = unhash_code( codeblock )
-                colored = self._color_with_pygments(codeblock, lexer,
-                                                    **formatter_opts)
-                return "\n\n%s\n\n" % colored
-
-        codeblock = self._encode_code(codeblock)
-        pre_class_str = self._html_class_str_from_tag("pre")
-        code_class_str = self._html_class_str_from_tag("code")
-        return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
-            pre_class_str, code_class_str, codeblock)
-
-    def _html_class_str_from_tag(self, tag):
-        """Get the appropriate ' class="..."' string (note the leading
-        space), if any, for the given tag.
-        """
-        if "html-classes" not in self.extras:
-            return ""
-        try:
-            html_classes_from_tag = self.extras["html-classes"]
-        except TypeError:
-            return ""
-        else:
-            if tag in html_classes_from_tag:
-                return ' class="%s"' % html_classes_from_tag[tag]
-        return ""
-
-    def _do_code_blocks(self, text):
-        """Process Markdown `<pre><code>` blocks."""
-        code_block_re = re.compile(r'''
-            (?:\n\n|\A\n?)
-            (               # $1 = the code block -- one or more lines, starting with a space/tab
-              (?:
-                (?:[ ]{%d} | \t)  # Lines must start with a tab or a tab-width of spaces
-                .*\n+
-              )+
-            )
-            ((?=^[ ]{0,%d}\S)|\Z)   # Lookahead for non-space at line-start, or end of doc
-            # Lookahead to make sure this block isn't already in a code block.
-            # Needed when syntax highlighting is being used.
-            (?![^<]*\</code\>)
-            ''' % (self.tab_width, self.tab_width),
-            re.M | re.X)
-        return code_block_re.sub(self._code_block_sub, text)
-
-    _fenced_code_block_re = re.compile(r'''
-        (?:\n+|\A\n?)
-        ^```([\w+-]+)?[ \t]*\n      # opening fence, $1 = optional lang
-        (.*?)                       # $2 = code block content
-        ^```[ \t]*\n                # closing fence
-        ''', re.M | re.X | re.S)
-
-    def _fenced_code_block_sub(self, match):
-        return self._code_block_sub(match, is_fenced_code_block=True)
-
-    def _do_fenced_code_blocks(self, text):
-        """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
-        return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
-
-    # Rules for a code span:
-    # - backslash escapes are not interpreted in a code span
-    # - to include one or or a run of more backticks the delimiters must
-    #   be a longer run of backticks
-    # - cannot start or end a code span with a backtick; pad with a
-    #   space and that space will be removed in the emitted HTML
-    # See `test/tm-cases/escapes.text` for a number of edge-case
-    # examples.
-    _code_span_re = re.compile(r'''
-            (?<!\\)
-            (`+)        # \1 = Opening run of `
-            (?!`)       # See Note A test/tm-cases/escapes.text
-            (.+?)       # \2 = The code block
-            (?<!`)
-            \1          # Matching closer
-            (?!`)
-        ''', re.X | re.S)
-
-    def _code_span_sub(self, match):
-        c = match.group(2).strip(" \t")
-        c = self._encode_code(c)
-        return "<code>%s</code>" % c
-
-    def _do_code_spans(self, text):
-        #   *   Backtick quotes are used for <code></code> spans.
-        #
-        #   *   You can use multiple backticks as the delimiters if you want to
-        #       include literal backticks in the code span. So, this input:
-        #
-        #         Just type ``foo `bar` baz`` at the prompt.
-        #
-        #       Will translate to:
-        #
-        #         <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
-        #
-        #       There's no arbitrary limit to the number of backticks you
-        #       can use as delimters. If you need three consecutive backticks
-        #       in your code, use four for delimiters, etc.
-        #
-        #   *   You can use spaces to get literal backticks at the edges:
-        #
-        #         ... type `` `bar` `` ...
-        #
-        #       Turns to:
-        #
-        #         ... type <code>`bar`</code> ...
-        return self._code_span_re.sub(self._code_span_sub, text)
-
-    def _encode_code(self, text):
-        """Encode/escape certain characters inside Markdown code runs.
-        The point is that in code, these characters are literals,
-        and lose their special Markdown meanings.
-        """
-        replacements = [
-            # Encode all ampersands; HTML entities are not
-            # entities within a Markdown code span.
-            ('&', '&amp;'),
-            # Do the angle bracket song and dance:
-            ('<', '&lt;'),
-            ('>', '&gt;'),
-        ]
-        for before, after in replacements:
-            text = text.replace(before, after)
-        hashed = _hash_text(text)
-        self._escape_table[text] = hashed
-        return hashed
-
-    _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S)
-    def _do_strike(self, text):
-        text = self._strike_re.sub(r"<strike>\1</strike>", text)
-        return text
-
-    _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
-    _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
-    _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
-    _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
-    def _do_italics_and_bold(self, text):
-        # <strong> must go first:
-        if "code-friendly" in self.extras:
-            text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
-            text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
-        else:
-            text = self._strong_re.sub(r"<strong>\2</strong>", text)
-            text = self._em_re.sub(r"<em>\2</em>", text)
-        return text
-
-    # "smarty-pants" extra: Very liberal in interpreting a single prime as an
-    # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
-    # "twixt" can be written without an initial apostrophe. This is fine because
-    # using scare quotes (single quotation marks) is rare.
-    _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
-    _contractions = ["tis", "twas", "twer", "neath", "o", "n",
-        "round", "bout", "twixt", "nuff", "fraid", "sup"]
-    def _do_smart_contractions(self, text):
-        text = self._apostrophe_year_re.sub(r"&#8217;\1", text)
-        for c in self._contractions:
-            text = text.replace("'%s" % c, "&#8217;%s" % c)
-            text = text.replace("'%s" % c.capitalize(),
-                "&#8217;%s" % c.capitalize())
-        return text
-
-    # Substitute double-quotes before single-quotes.
-    _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
-    _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
-    _closing_single_quote_re = re.compile(r"(?<=\S)'")
-    _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
-    def _do_smart_punctuation(self, text):
-        """Fancifies 'single quotes', "double quotes", and apostrophes.
-        Converts --, ---, and ... into en dashes, em dashes, and ellipses.
-
-        Inspiration is: <http://daringfireball.net/projects/smartypants/>
-        See "test/tm-cases/smarty_pants.text" for a full discussion of the
-        support here and
-        <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
-        discussion of some diversion from the original SmartyPants.
-        """
-        if "'" in text:  # guard for perf
-            text = self._do_smart_contractions(text)
-            text = self._opening_single_quote_re.sub("&#8216;", text)
-            text = self._closing_single_quote_re.sub("&#8217;", text)
-
-        if '"' in text:  # guard for perf
-            text = self._opening_double_quote_re.sub("&#8220;", text)
-            text = self._closing_double_quote_re.sub("&#8221;", text)
-
-        text = text.replace("---", "&#8212;")
-        text = text.replace("--", "&#8211;")
-        text = text.replace("...", "&#8230;")
-        text = text.replace(" . . . ", "&#8230;")
-        text = text.replace(". . .", "&#8230;")
-        return text
-
-    _block_quote_base = r'''
-        (                           # Wrap whole match in \1
-          (
-            ^[ \t]*>%s[ \t]?        # '>' at the start of a line
-              .+\n                  # rest of the first line
-            (.+\n)*                 # subsequent consecutive lines
-            \n*                     # blanks
-          )+
-        )
-    '''
-    _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X)
-    _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X)
-    _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M)
-    _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M)
-    _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M)
-    _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
-    def _dedent_two_spaces_sub(self, match):
-        return re.sub(r'(?m)^  ', '', match.group(1))
-
-    def _block_quote_sub(self, match):
-        bq = match.group(1)
-        is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq)
-        # trim one level of quoting
-        if is_spoiler:
-            bq = self._bq_one_level_re_spoiler.sub('', bq)
-        else:
-            bq = self._bq_one_level_re.sub('', bq)
-        # trim whitespace-only lines
-        bq = self._ws_only_line_re.sub('', bq)
-        bq = self._run_block_gamut(bq)          # recurse
-
-        bq = re.sub('(?m)^', '  ', bq)
-        # These leading spaces screw with <pre> content, so we need to fix that:
-        bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
-
-        if is_spoiler:
-            return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq
-        else:
-            return '<blockquote>\n%s\n</blockquote>\n\n' % bq
-
-    def _do_block_quotes(self, text):
-        if '>' not in text:
-            return text
-        if 'spoiler' in self.extras:
-            return self._block_quote_re_spoiler.sub(self._block_quote_sub, text)
-        else:
-            return self._block_quote_re.sub(self._block_quote_sub, text)
-
-    def _form_paragraphs(self, text):
-        # Strip leading and trailing lines:
-        text = text.strip('\n')
-
-        # Wrap <p> tags.
-        grafs = []
-        for i, graf in enumerate(re.split(r"\n{2,}", text)):
-            if graf in self.html_blocks:
-                # Unhashify HTML blocks
-                grafs.append(self.html_blocks[graf])
-            else:
-                cuddled_list = None
-                if "cuddled-lists" in self.extras:
-                    # Need to put back trailing '\n' for `_list_item_re`
-                    # match at the end of the paragraph.
-                    li = self._list_item_re.search(graf + '\n')
-                    # Two of the same list marker in this paragraph: a likely
-                    # candidate for a list cuddled to preceding paragraph
-                    # text (issue 33). Note the `[-1]` is a quick way to
-                    # consider numeric bullets (e.g. "1." and "2.") to be
-                    # equal.
-                    if (li and len(li.group(2)) <= 3 and li.group("next_marker")
-                        and li.group("marker")[-1] == li.group("next_marker")[-1]):
-                        start = li.start()
-                        cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
-                        assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
-                        graf = graf[:start]
-
-                # Wrap <p> tags.
-                graf = self._run_span_gamut(graf)
-                grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
-
-                if cuddled_list:
-                    grafs.append(cuddled_list)
-
-        return "\n\n".join(grafs)
-
-    def _add_footnotes(self, text):
-        if self.footnotes:
-            footer = [
-                '<div class="footnotes">',
-                '<hr' + self.empty_element_suffix,
-                '<ol>',
-            ]
-            for i, id in enumerate(self.footnote_ids):
-                if i != 0:
-                    footer.append('')
-                footer.append('<li id="fn-%s">' % id)
-                footer.append(self._run_block_gamut(self.footnotes[id]))
-                backlink = ('<a href="#fnref-%s" '
-                    'class="footnoteBackLink" '
-                    'title="Jump back to footnote %d in the text.">'
-                    '&#8617;</a>' % (id, i+1))
-                if footer[-1].endswith("</p>"):
-                    footer[-1] = footer[-1][:-len("</p>")] \
-                        + '&#160;' + backlink + "</p>"
-                else:
-                    footer.append("\n<p>%s</p>" % backlink)
-                footer.append('</li>')
-            footer.append('</ol>')
-            footer.append('</div>')
-            return text + '\n\n' + '\n'.join(footer)
-        else:
-            return text
-
-    # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
-    #   http://bumppo.net/projects/amputator/
-    _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
-    _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
-    _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
-
-    def _encode_amps_and_angles(self, text):
-        # Smart processing for ampersands and angle brackets that need
-        # to be encoded.
-        text = self._ampersand_re.sub('&amp;', text)
-
-        # Encode naked <'s
-        text = self._naked_lt_re.sub('&lt;', text)
-
-        # Encode naked >'s
-        # Note: Other markdown implementations (e.g. Markdown.pl, PHP
-        # Markdown) don't do this.
-        text = self._naked_gt_re.sub('&gt;', text)
-        return text
-
-    def _encode_backslash_escapes(self, text):
-        for ch, escape in list(self._escape_table.items()):
-            text = text.replace("\\"+ch, escape)
-        return text
-
-    _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
-    def _auto_link_sub(self, match):
-        g1 = match.group(1)
-        return '<a href="%s">%s</a>' % (g1, g1)
-
-    _auto_email_link_re = re.compile(r"""
-          <
-           (?:mailto:)?
-          (
-              [-.\w]+
-              \@
-              [-\w]+(\.[-\w]+)*\.[a-z]+
-          )
-          >
-        """, re.I | re.X | re.U)
-    def _auto_email_link_sub(self, match):
-        return self._encode_email_address(
-            self._unescape_special_chars(match.group(1)))
-
-    def _do_auto_links(self, text):
-        text = self._auto_link_re.sub(self._auto_link_sub, text)
-        text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
-        return text
-
-    def _encode_email_address(self, addr):
-        #  Input: an email address, e.g. "foo at example.com"
-        #
-        #  Output: the email address as a mailto link, with each character
-        #      of the address encoded as either a decimal or hex entity, in
-        #      the hopes of foiling most address harvesting spam bots. E.g.:
-        #
-        #    <a href="&#x6D;&#97;&#105;&#108;&#x74;&#111;:&#102;&#111;&#111;&#64;&#101;
-        #       x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;">&#102;&#111;&#111;
-        #       &#64;&#101;x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;</a>
-        #
-        #  Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
-        #  mailing list: <http://tinyurl.com/yu7ue>
-        chars = [_xml_encode_email_char_at_random(ch)
-                 for ch in "mailto:" + addr]
-        # Strip the mailto: from the visible part.
-        addr = '<a href="%s">%s</a>' \
-               % (''.join(chars), ''.join(chars[7:]))
-        return addr
-
-    def _do_link_patterns(self, text):
-        """Caveat emptor: there isn't much guarding against link
-        patterns being formed inside other standard Markdown links, e.g.
-        inside a [link def][like this].
-
-        Dev Notes: *Could* consider prefixing regexes with a negative
-        lookbehind assertion to attempt to guard against this.
-        """
-        link_from_hash = {}
-        for regex, repl in self.link_patterns:
-            replacements = []
-            for match in regex.finditer(text):
-                if hasattr(repl, "__call__"):
-                    href = repl(match)
-                else:
-                    href = match.expand(repl)
-                replacements.append((match.span(), href))
-            for (start, end), href in reversed(replacements):
-                escaped_href = (
-                    href.replace('"', '&quot;')  # b/c of attr quote
-                        # To avoid markdown <em> and <strong>:
-                        .replace('*', self._escape_table['*'])
-                        .replace('_', self._escape_table['_']))
-                link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
-                hash = _hash_text(link)
-                link_from_hash[hash] = link
-                text = text[:start] + hash + text[end:]
-        for hash, link in list(link_from_hash.items()):
-            text = text.replace(hash, link)
-        return text
-
-    def _unescape_special_chars(self, text):
-        # Swap back in all the special characters we've hidden.
-        for ch, hash in list(self._escape_table.items()):
-            text = text.replace(hash, ch)
-        return text
-
-    def _outdent(self, text):
-        # Remove one level of line-leading tabs or spaces
-        return self._outdent_re.sub('', text)
-
-
-class MarkdownWithExtras(Markdown):
-    """A markdowner class that enables most extras:
-
-    - footnotes
-    - code-color (only has effect if 'pygments' Python module on path)
-
-    These are not included:
-    - pyshell (specific to Python-related documenting)
-    - code-friendly (because it *disables* part of the syntax)
-    - link-patterns (because you need to specify some actual
-      link-patterns anyway)
-    """
-    extras = ["footnotes", "code-color"]
-
-
-# ---- internal support functions
-
-class UnicodeWithAttrs(unicode):
-    """A subclass of unicode used for the return value of conversion to
-    possibly attach some attributes. E.g. the "toc_html" attribute when
-    the "toc" extra is used.
-    """
-    metadata = None
-    _toc = None
-    def toc_html(self):
-        """Return the HTML for the current TOC.
-
-        This expects the `_toc` attribute to have been set on this instance.
-        """
-        if self._toc is None:
-            return None
-
-        def indent():
-            return '  ' * (len(h_stack) - 1)
-        lines = []
-        h_stack = [0]   # stack of header-level numbers
-        for level, id, name in self._toc:
-            if level > h_stack[-1]:
-                lines.append("%s<ul>" % indent())
-                h_stack.append(level)
-            elif level == h_stack[-1]:
-                lines[-1] += "</li>"
-            else:
-                while level < h_stack[-1]:
-                    h_stack.pop()
-                    if not lines[-1].endswith("</li>"):
-                        lines[-1] += "</li>"
-                    lines.append("%s</ul></li>" % indent())
-            lines.append('%s<li><a href="#%s">%s</a>' % (
-                indent(), id, name))
-        while len(h_stack) > 1:
-            h_stack.pop()
-            if not lines[-1].endswith("</li>"):
-                lines[-1] += "</li>"
-            lines.append("%s</ul>" % indent())
-        return '\n'.join(lines) + '\n'
-    toc_html = property(toc_html)
-
-## {{{ http://code.activestate.com/recipes/577257/ (r1)
-_slugify_strip_re = re.compile(r'[^\w\s-]')
-_slugify_hyphenate_re = re.compile(r'[-\s]+')
-def _slugify(value):
-    """
-    Normalizes string, converts to lowercase, removes non-alpha characters,
-    and converts spaces to hyphens.
-
-    From Django's "django/template/defaultfilters.py".
-    """
-    import unicodedata
-    value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
-    value = _slugify_strip_re.sub('', value).strip().lower()
-    return _slugify_hyphenate_re.sub('-', value)
-## end of http://code.activestate.com/recipes/577257/ }}}
-
-
-# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
-def _curry(*args, **kwargs):
-    function, args = args[0], args[1:]
-    def result(*rest, **kwrest):
-        combined = kwargs.copy()
-        combined.update(kwrest)
-        return function(*args + rest, **combined)
-    return result
-
-
-# Recipe: regex_from_encoded_pattern (1.0)
-def _regex_from_encoded_pattern(s):
-    """'foo'    -> re.compile(re.escape('foo'))
-       '/foo/'  -> re.compile('foo')
-       '/foo/i' -> re.compile('foo', re.I)
-    """
-    if s.startswith('/') and s.rfind('/') != 0:
-        # Parse it: /PATTERN/FLAGS
-        idx = s.rfind('/')
-        pattern, flags_str = s[1:idx], s[idx+1:]
-        flag_from_char = {
-            "i": re.IGNORECASE,
-            "l": re.LOCALE,
-            "s": re.DOTALL,
-            "m": re.MULTILINE,
-            "u": re.UNICODE,
-        }
-        flags = 0
-        for char in flags_str:
-            try:
-                flags |= flag_from_char[char]
-            except KeyError:
-                raise ValueError("unsupported regex flag: '%s' in '%s' "
-                                 "(must be one of '%s')"
-                                 % (char, s, ''.join(list(flag_from_char.keys()))))
-        return re.compile(s[1:idx], flags)
-    else:  # not an encoded regex
-        return re.compile(re.escape(s))
-
-
-# Recipe: dedent (0.1.2)
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
-    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-
-        "lines" is a list of lines to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-
-    Same as dedent() except operates on a sequence of lines. Note: the
-    lines list is modified **in-place**.
-    """
-    DEBUG = False
-    if DEBUG:
-        print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
-              % (tabsize, skip_first_line))
-    margin = None
-    for i, line in enumerate(lines):
-        if i == 0 and skip_first_line: continue
-        indent = 0
-        for ch in line:
-            if ch == ' ':
-                indent += 1
-            elif ch == '\t':
-                indent += tabsize - (indent % tabsize)
-            elif ch in '\r\n':
-                continue  # skip all-whitespace lines
-            else:
-                break
-        else:
-            continue  # skip all-whitespace lines
-        if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
-        if margin is None:
-            margin = indent
-        else:
-            margin = min(margin, indent)
-    if DEBUG: print("dedent: margin=%r" % margin)
-
-    if margin is not None and margin > 0:
-        for i, line in enumerate(lines):
-            if i == 0 and skip_first_line: continue
-            removed = 0
-            for j, ch in enumerate(line):
-                if ch == ' ':
-                    removed += 1
-                elif ch == '\t':
-                    removed += tabsize - (removed % tabsize)
-                elif ch in '\r\n':
-                    if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
-                    lines[i] = lines[i][j:]
-                    break
-                else:
-                    raise ValueError("unexpected non-whitespace char %r in "
-                                     "line %r while removing %d-space margin"
-                                     % (ch, line, margin))
-                if DEBUG:
-                    print("dedent: %r: %r -> removed %d/%d"\
-                          % (line, ch, removed, margin))
-                if removed == margin:
-                    lines[i] = lines[i][j+1:]
-                    break
-                elif removed > margin:
-                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
-                    break
-            else:
-                if removed:
-                    lines[i] = lines[i][removed:]
-    return lines
-
-
-def _dedent(text, tabsize=8, skip_first_line=False):
-    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
-        "text" is the text to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-
-    textwrap.dedent(s), but don't expand tabs to spaces
-    """
-    lines = text.splitlines(1)
-    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
-    return ''.join(lines)
-
-
-class _memoized(object):
-    """Decorator that caches a function's return value each time it is called.
-    If called later with the same arguments, the cached value is returned, and
-    not re-evaluated.
-
-    http://wiki.python.org/moin/PythonDecoratorLibrary
-    """
-    def __init__(self, func):
-        self.func = func
-        self.cache = {}
-
-    def __call__(self, *args):
-        try:
-            return self.cache[args]
-        except KeyError:
-            self.cache[args] = value = self.func(*args)
-            return value
-        except TypeError:
-            # uncachable -- for instance, passing a list as an argument.
-            # Better to not cache than to blow up entirely.
-            return self.func(*args)
-
-    def __repr__(self):
-        """Return the function's docstring."""
-        return self.func.__doc__
-
-
-def _xml_oneliner_re_from_tab_width(tab_width):
-    """Standalone XML processing instruction regex."""
-    return re.compile(r"""
-        (?:
-            (?<=\n\n)       # Starting after a blank line
-            |               # or
-            \A\n?           # the beginning of the doc
-        )
-        (                           # save in $1
-            [ ]{0,%d}
-            (?:
-                <\?\w+\b\s+.*?\?>   # XML processing instruction
-                |
-                <\w+:\w+\b\s+.*?/>  # namespaced single tag
-            )
-            [ \t]*
-            (?=\n{2,}|\Z)       # followed by a blank line or end of document
-        )
-        """ % (tab_width - 1), re.X)
-_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
-
-
-def _hr_tag_re_from_tab_width(tab_width):
-    return re.compile(r"""
-        (?:
-            (?<=\n\n)       # Starting after a blank line
-            |               # or
-            \A\n?           # the beginning of the doc
-        )
-        (                       # save in \1
-            [ ]{0,%d}
-            <(hr)               # start tag = \2
-            \b                  # word break
-            ([^<>])*?           #
-            /?>                 # the matching end tag
-            [ \t]*
-            (?=\n{2,}|\Z)       # followed by a blank line or end of document
-        )
-        """ % (tab_width - 1), re.X)
-_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
-
-
-def _xml_escape_attr(attr, skip_single_quote=True):
-    """Escape the given string for use in an HTML/XML tag attribute.
-
-    By default this doesn't bother with escaping `'` to `&#39;`, presuming that
-    the tag attribute is surrounded by double quotes.
-    """
-    escaped = (attr
-        .replace('&', '&amp;')
-        .replace('"', '&quot;')
-        .replace('<', '&lt;')
-        .replace('>', '&gt;'))
-    if not skip_single_quote:
-        escaped = escaped.replace("'", "&#39;")
-    return escaped
-
-
-def _xml_encode_email_char_at_random(ch):
-    r = random()
-    # Roughly 10% raw, 45% hex, 45% dec.
-    # '@' *must* be encoded. I [John Gruber] insist.
-    # Issue 26: '_' must be encoded.
-    if r > 0.9 and ch not in "@_":
-        return ch
-    elif r < 0.45:
-        # The [1:] is to drop leading '0': 0x63 -> x63
-        return '&#%s;' % hex(ord(ch))[1:]
-    else:
-        return '&#%s;' % ord(ch)
-
-
-def _urlencode(attr, safe_mode=False):
-    """Replace special characters in string using the %xx escape."""
-    if safe_mode:
-        escaped = quote_plus(attr).replace('+', ' ')
-    else:
-        escaped = attr.replace('"', '%22')
-    return escaped
-
-
-# ---- mainline
-
-class _NoReflowFormatter(optparse.IndentedHelpFormatter):
-    """An optparse formatter that does NOT reflow the description."""
-    def format_description(self, description):
-        return description or ""
-
-
-def _test():
-    import doctest
-    doctest.testmod()
-
-
-def main(argv=None):
-    if argv is None:
-        argv = sys.argv
-    if not logging.root.handlers:
-        logging.basicConfig()
-
-    usage = "usage: %prog [PATHS...]"
-    version = "%prog "+__version__
-    parser = optparse.OptionParser(prog="markdown2", usage=usage,
-        version=version, description=cmdln_desc,
-        formatter=_NoReflowFormatter())
-    parser.add_option("-v", "--verbose", dest="log_level",
-                      action="store_const", const=logging.DEBUG,
-                      help="more verbose output")
-    parser.add_option("--encoding",
-                      help="specify encoding of text content")
-    parser.add_option("--html4tags", action="store_true", default=False,
-                      help="use HTML 4 style for empty element tags")
-    parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
-                      help="sanitize literal HTML: 'escape' escapes "
-                           "HTML meta chars, 'replace' replaces with an "
-                           "[HTML_REMOVED] note")
-    parser.add_option("-x", "--extras", action="append",
-                      help="Turn on specific extra features (not part of "
-                           "the core Markdown spec). See above.")
-    parser.add_option("--use-file-vars",
-                      help="Look for and use Emacs-style 'markdown-extras' "
-                           "file var to turn on extras. See "
-                           "<https://github.com/trentm/python-markdown2/wiki/Extras>")
-    parser.add_option("--link-patterns-file",
-                      help="path to a link pattern file")
-    parser.add_option("--self-test", action="store_true",
-                      help="run internal self-tests (some doctests)")
-    parser.add_option("--compare", action="store_true",
-                      help="run against Markdown.pl as well (for testing)")
-    parser.set_defaults(log_level=logging.INFO, compare=False,
-                        encoding="utf-8", safe_mode=None, use_file_vars=False)
-    opts, paths = parser.parse_args()
-    log.setLevel(opts.log_level)
-
-    if opts.self_test:
-        return _test()
-
-    if opts.extras:
-        extras = {}
-        for s in opts.extras:
-            splitter = re.compile("[,;: ]+")
-            for e in splitter.split(s):
-                if '=' in e:
-                    ename, earg = e.split('=', 1)
-                    try:
-                        earg = int(earg)
-                    except ValueError:
-                        pass
-                else:
-                    ename, earg = e, None
-                extras[ename] = earg
-    else:
-        extras = None
-
-    if opts.link_patterns_file:
-        link_patterns = []
-        f = open(opts.link_patterns_file)
-        try:
-            for i, line in enumerate(f.readlines()):
-                if not line.strip(): continue
-                if line.lstrip().startswith("#"): continue
-                try:
-                    pat, href = line.rstrip().rsplit(None, 1)
-                except ValueError:
-                    raise MarkdownError("%s:%d: invalid link pattern line: %r"
-                                        % (opts.link_patterns_file, i+1, line))
-                link_patterns.append(
-                    (_regex_from_encoded_pattern(pat), href))
-        finally:
-            f.close()
-    else:
-        link_patterns = None
-
-    from os.path import join, dirname, abspath, exists
-    markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
-                       "Markdown.pl")
-    if not paths:
-        paths = ['-']
-    for path in paths:
-        if path == '-':
-            text = sys.stdin.read()
-        else:
-            fp = codecs.open(path, 'r', opts.encoding)
-            text = fp.read()
-            fp.close()
-        if opts.compare:
-            from subprocess import Popen, PIPE
-            print("==== Markdown.pl ====")
-            p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
-            p.stdin.write(text.encode('utf-8'))
-            p.stdin.close()
-            perl_html = p.stdout.read().decode('utf-8')
-            if py3:
-                sys.stdout.write(perl_html)
-            else:
-                sys.stdout.write(perl_html.encode(
-                    sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
-            print("==== markdown2.py ====")
-        html = markdown(text,
-            html4tags=opts.html4tags,
-            safe_mode=opts.safe_mode,
-            extras=extras, link_patterns=link_patterns,
-            use_file_vars=opts.use_file_vars)
-        if py3:
-            sys.stdout.write(html)
-        else:
-            sys.stdout.write(html.encode(
-                sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
-        if extras and "toc" in extras:
-            log.debug("toc_html: " +
-                str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')))
-        if opts.compare:
-            test_dir = join(dirname(dirname(abspath(__file__))), "test")
-            if exists(join(test_dir, "test_markdown2.py")):
-                sys.path.insert(0, test_dir)
-                from test_markdown2 import norm_html_from_html
-                norm_html = norm_html_from_html(html)
-                norm_perl_html = norm_html_from_html(perl_html)
-            else:
-                norm_html = html
-                norm_perl_html = perl_html
-            print("==== match? %r ====" % (norm_perl_html == norm_html))
-
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv))
Index: checkout/doc/README.md
===================================================================
--- checkout/doc/README.md	(revision 93212)
+++ checkout/doc/README.md	(nonexistent)
@@ -1,17 +0,0 @@
-National Environmental Modeling System (NEMS) Version 4.0
-=========================================================
-
-This is the source directory for the NEMS component of NEMS-based
-modeling systems.  It:
-
-* Employs ESMF superstructure & utilities
-* Separate dynamics, physics, and coupler grid components &
-  import/export states
-* Digital filtering capabilities added
-* Adiabatic (dynamics only) simulation capability
-* Enhanced postprocessing capability
-
-Most documentation that was in this file is now in the doc/
-subdirectory, or in the app-level doc/ directory.  This documentation
-is automatically combined into a single HTML file when runs `make` in
-the doc/ subdirectory.
Index: checkout/doc/OLDTEST.md
===================================================================
--- checkout/doc/OLDTEST.md	(revision 93212)
+++ checkout/doc/OLDTEST.md	(nonexistent)
@@ -1,165 +0,0 @@
-Old Regression Test System
-==========================
-
-This section documents the old rt.sh system, which has been replaced.
-This system is no longer supported, but has been retained for backward
-compatibility.  Furthermore, the current NEMSCompsetRun requires this
-script.  The NEMSCompsetRun will be updated shortly, after some
-modulefile changes are made in NEMS.
-
-Running rt.sh
--------------
-
-The older regression test system is run as follows in bash, sh, or ksh:
-
-    cd NEMS/oldtests
-    ./rt.sh (command) (options) > rt.log 2>&1 &
-
-In csh or tcsh, do this:
-
-    cd NEMS/oldtests
-    ./rt.sh (command) (options) >& rt.log &
-
-This will launch a background process that runs the `rt.sh` and logs
-the status to `rt.log`.
-
-The `(command)` must include at least one of the following, which
-specify what is being run:
-
-* `-f` - run all tests that work on your platform
-* `-s` - run "standard" tests that work on your platform
-* `-c gfs` - create a new baseline for the gsm, wam and gocart tests
-* `-c nmm` - create a new baseline for nmm tests
-
-In addition, the following `(option)`s work:
-
-* `-m` - compare against user's baseline
-* `-l FILE` - use an alternate file instead of `rt.conf`
-
-The `>rt.log2>&` or `>&rt.log` are redirection operators in your
-shell.  They will log additional information from `rt.sh` to `rt.log`.
-This is critical when debugging problems such as batch system failures
-or disk quota problems.
-
-Output of rt.sh
----------------
-
-The rt.sh produces several log files and a directory of results.
-
-* `RegressionTests_(platform).log` - regression test results
-* `Compile_(platform).log` - compilation logs
-* `rt.log` - debug output of rt.sh if you used the redirection operators
-* `/path/to/stmp/$USER/rt.$$/` - directory in which the tests were run
-
-In these paths,
-
-* `(platform)` - "theia" or "wcoss", the platform on which you ran
-* `/path/to/stmp` - is the scrub area chosen by the scripts
-* `$USER` - is your username
-* `$$` - is a unique id chosen by the `rt.sh` to avoid overwriting
-  an old run.  Generally it is the UNIX process id of `rt.sh`
-
-To find `/path/to/stmp` and `$$` you need to look in the log of `rt.sh`
-for a line like this, near the top of the `rt.log`:
-
-    mkdir -p /scratch4/NCEPDEV/stmp3/Samuel.Trahan/rt_104307
-
-Within that directory, you will find one directory for each test:
-
-    you at theia> ls -1 /scratch4/NCEPDEV/stmp3/Samuel.Trahan/rt_104307
-    gfs_eulerian
-    gfs_gocart_nemsio
-    gfs_slg
-    gfs_slg_48pe
-    gfs_slg_adiabatic
-    gfs_slg_land
-    gfs_slg_nsst
-    gfs_slg_rsthst
-    gfs_slg_stochy
-    gfs_slg_t574
-    nmm_2way_nests
-    nmm_2way_nests_debug
-    nmm_2way_nests_restart
-
-Each directory contains input and output files for each test.  Some
-files of interest are:
-
-* `err` - stderr stream from the batch job that ran this program
-* `out` - stdout stream from the batch job that ran this program
-* `PET*.ESMF_LogFile` - ESMF log files from each MPI rank
-* `nemsusage.xml` - resource usage information for all MPI ranks
-* `timing.summary` - resource usage information for rank 0
-
-Configuring rt.sh: The rt.conf
-------------------------------
-
-The `rt.sh` guides its function based on the `rt.conf` file.  That
-file can be found in the NEMS/oldtests directory and has the following
-syntax:
-
-| COMMAND  |    METHOD    |  SUBSET  | PLATFORM |   VERSION  |
-| -------- | ------------ | -------- | -------- | ---------- |
-| APPBUILD | app=APP-NAME | standard |          |            |
-| RUN      | test_name    | standard |          | nmm        |
-| COMPILE  | nmm          | standard |          | nmmb_intel |
-
-The available commands are:
-
-* `APPBUILD` - run the [NEMSAppBuilder](#ni-appbuild) and load new modules
-* `RUN` - run a test
-* `COMPILE` - no longer supported; runs the [manual build system](#manual-method)
-
-The meaning of the other arguments depends on the command, and is
-described below.
-
-### rt.conf APPBUILD Command
-
-When the command is `APPBUILD` the other arguments have these meanings:
-
-* `METHOD` - arguments to send to the NEMSAppBuilder
-* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
-  here, then only the `rt.sh -s` mode will run this build.
-* `PLATFORM` - `wcoss` to run only on WCOSS, `theia` to run only on Theia,
-  or empty (all whitespace) to run on all platforms
-* `VERSION` - unused; leave this blank (all whitespace)
-
-### rt.conf RUN Command
-
-The RUN command runs a test.  The meanings of the columns are as follows:
-
-* `METHOD` - name of the test.  This must correspond to a file in the
-  NEMS/oldtests/tests directory.
-* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
-  here, then only the `rt.sh -s` mode will run this build.
-* `PLATFORM` - `wcoss` to run only on WCOSS, `theia` to run only on Theia,
-  or empty (all whitespace) to run on all platforms
-* `VERSION` - which model this pertains to: `nmm` or `gfs`
-
-### rt.conf COMPILE command
-
-This command runs the [manual build system](#manual-method).  This is
-unsupported and retained only for debugging the new build system.
-
-* `METHOD` - arguments to the `make` command
-* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
-  here, then only the `rt.sh -s` mode will run this build.
-* `PLATFORM` - Mandatory.  Must be `wcoss` to run only on WCOSS or
-  `theia` to run only on Theia.  This is used to construct the
-  `configure` command.
-* `VERSION` - Mandatory. The ESMF version, passed to the `configure` command.
-
-In the `COMPILE` mode, the following commands are run based on those
-arguments:
-
-    ./configure (VERSION)_(PLATFORM)
-    source conf/modules.nems
-    gmake clean
-    gmake (METHOD) J=-j2
-
-### Subsetting Tests in rt.conf
-
-Note that you can explicitly disable parts of the test suite by
-commenting out lines of rt.conf.  Note that disabling the build
-commands (APPBUILD or COMPILE) will skip the build process and cause
-tests to be run with whatever NEMS.x and modules.conf presently in
-the NEMS external.
Index: checkout/doc/BUILD.md
===================================================================
--- checkout/doc/BUILD.md	(revision 93212)
+++ checkout/doc/BUILD.md	(nonexistent)
@@ -1,129 +0,0 @@
-<a name="building"></a>Building NEMS
-==========================================
-
-This chapter describes the options for building the NEMSLegacy, and
-the supported platforms.  There are three ways to build: the
-NEMSAppBuilder in interactive or non-interactive mode, or a manual
-process.  The recommended way to compile the NEMS is to use the
-NEMSAppBuilder in non-interactive mode.  However, all methods are
-described here.  We also provide troubleshooting information at the
-end of this chapter.
-
-Build Targets
--------------
-
-The list of build targets available for an app is found at the top
-level of the app in `*.appBuilder` files.  The app-level documentation
-should have information about the meanings and purpose of each build
-target.
-
-<a name="ni-appbuild"></a> Recommended Method: Non-Interactive NEMSAppBuilder
------------------------------------------------------------------------------
-
-From the top level (directory above NEMS), run the `NEMSAppBuilder`.
-This is a build script that knows how to build various apps, and works
-for more than just the NEMSLegacy.  The syntax is:
-
-    ./NEMS/NEMSAppBuilder (options) app=(app)
-
-Here, the `(app)` is the selected application as discussed in the
-[Supported Builds and Platforms section](#supbuild).  The `(options)`
-should be one of the following:
-
-* `rebuild` - clean the source directory before recompiling.
-* `norebuild` - do not clean; reuse existing libraries and object
-  files whenever possible.
-
-
-Interactive NEMSAppBuilder
---------------------------
-
-The NEMSAppBuilder can be run in interactive mode.  To do so, simply
-run the command without any arguments:
-
-    ./NEMS/NEMSAppBuilder
-
-The NEMSAppBuilder will instruct you further.  Note that this method
-will discard some of the log files, which makes build failures harder
-to track.  Also, it has some errors in its process tracking, and will
-kill the wrong processes when a build is canceled.  Such bugs are why
-the non-interactive mode is preferred.
-
-
-
-<a name="manual-method"></a>Manual Method: Configure, Clean, Make
------------------------------------------------------------------
-
-It is possible to build all apps via a manual method.  This method
-also makes other, undocumented, features available.  Ultimately, the
-NEMSAppBuilder is simply a wrapper around these manual commands.
-Before using such manual commands, it is best to talk to code managers
-to make sure you are building correctly.
-
-The manual method works like this:
-
-    cd NEMS/src/
-    ./configure (method)
-    source conf/modules.nems
-    gmake clean
-    gmake (nems-ver) J=-j2
-
-The `(method)` is one of the available configurations.  Run
-`./configure help` to get a list, or read the `configure` script.
-The `(nems-ver)` is one of the following:
-
-* `gsm` - build the GSM without GOCART
-* `gsm GOCART_MODE=full`
-* `nmm` - build the NMM without debug
-* `nmm DEBUG=on` - build NMM in debug mode
-* `nmm_post` - build NMM with inline post-processing
-
-
-
-Troubleshooting Failed Builds
------------------------------
-
-### Incomplete Checkout
-
-When there are network problems or high server load, your checkout
-from the Subversion and Git repositories may fail.  This will lead to
-any number of confusing errors while building.  You can continue the
-checkout process by going to the top level (above the NEMS directory) and running
-`svn update`.  Repeat that until no more files are updated, and no
-errors are reported.
-
-### Unclean Environment
-
-Setting up your environment incorrectly can lead to problems while
-building.  If you see build issues from a clean, new checkout, this
-may be the problem.  You should remove all `module` commands from your
-`~/.*rc` files and get a clean, new login shell.  Then retry the
-build.
-
-### Unclean Checkout
-
-Another common cause of failed builds is having unintended changes in
-your source code or build system.  To test for this, get a clean, new
-checkout from the repository and retry.
-
-### Unsupported Platform
-
-Some apps only support a few platforms.  For example, the NEMSLegacy
-app is only supported on WCOSS Phase 1 (Gyre/Tide) and NOAA Theia.
-Attempts to build on other platforms may or may not work.
-
-### Simultaneous Builds
-
-Attempting to build multiple times in the same NEMS checkout directory
-will cause unexpected failures.  For example, if you are running the
-regression test system twice at once, multiple builds will happen at
-the same time.  On Theia, this frequently shows up as a massive, many
-terabyte, file which cannot be created due to fileset quota limits.
-Other failure modes are possible.
-
-
-
-
-
-
-
Index: checkout/doc/md2html.py
===================================================================
--- checkout/doc/md2html.py	(revision 93212)
+++ checkout/doc/md2html.py	(nonexistent)
@@ -1,83 +0,0 @@
-#! /usr/bin/env python
-
-# Generates the README.html from README.md using the markdown2 module
-# in markdown.py.  Provide no arguments; just run from the app-level
-# doc directory.
-
-import markdown2
-import StringIO
-import logging
-import sys
-
-# Logging object for this module:
-logger=None
-
-TOP='''<html>
-<head>
-  <title>NEMSLegacy Build and Test Instructions</title>
-  <link rel="stylesheet" type="text/css" href="README.css">
-</head>
-<body>
-'''
-
-BOTTOM='''
-</body>
-</html>'''
-
-# List of extra options to turn on in markdown2:
-EXTRAS=['tables','code-friendly']
-
-def write(outfile,infiles):
-    # Open README.html as htmlf, in truncate mode:
-    logger.info('%s: output file'%(outfile,))
-    with open(outfile,'wt') as htmlf:
-        # Write the heading and open the <body> tag:
-        htmlf.write(TOP)
-
-        # Loop over all input files, writing each one:
-        for infile in infiles:
-            logger.info('%s: input file'%(infile,))
-            try:
-                htmlf.write(convert(infile))
-            except EnvironmentError as ee:
-                logger.warning('%s: skipping file: %s'%(infile,str(ee)))
-
-        # Close the body and html tags:
-        htmlf.write(BOTTOM)
-
-
-def convert(infile):
-    # Open the *.md file as mdf:
-    with open(infile,'rt') as mdf:
-        # Read all of the *.md file into md:
-        md=mdf.read()
-
-    # convert the contents of the *.md file to HTML and return it:
-    return markdown2.markdown(md,extras=EXTRAS)
-    
-def initlogging():
-    global logger
-    logger=logging.getLogger('md2html')
-    oformat=logging.Formatter(
-        "%(asctime)s.%(msecs)03d %(name)s (%(filename)s:%(lineno)d) "
-        "%(levelname)s: %(message)s",
-        "%m/%d %H:%M:%S")
-    root=logging.getLogger()
-    root.setLevel(logging.INFO)
-    logstream=logging.StreamHandler(sys.stderr)
-    logstream.setFormatter(oformat)
-    logstream.setLevel(logging.INFO)
-    root.addHandler(logstream)
-
-def main(args):
-    initlogging()
-    if len(args)<2:
-        usage()
-    outfile=args[-1]   # last argument is the output file
-    infiles=args[0:-1] # remaining arguments are the input files
-    logger.info('Out %s in %s'%(outfile,
-                                ':'.join(infiles)))
-    write(outfile,infiles)
-
-if __name__=='__main__':
-    main(sys.argv[1:])

Property changes on: checkout/doc/md2html.py
___________________________________________________________________
Deleted: svn:executable
## -1 +0,0 ##
-*
\ No newline at end of property
Index: checkout/doc/cddm-report.md
===================================================================
--- checkout/doc/cddm-report.md	(nonexistent)
+++ checkout/doc/cddm-report.md	(revision 94669)
@@ -0,0 +1,2434 @@
+CDDM Workshop Report {#cddm-report}
+====================
+
+\warning <b>This is document is retained for historical record; much
+information here is out-of-date.</b> This is the report from a
+workshop, as described below.  The contents are copied verbatim.  More
+recent information can be found in the \ref documentation
+"Documentation" page.
+
+\date CDDM workshop was held on September 1-2, 2016
+
+&nbsp;
+
+------------------------------------------------------------------------
+
+Purpose
+=======
+
+\note This document requires a management decision about whether it
+will be maintained or its contents organized elsewhere. This is one of
+the actions identified in the CDDM workshop held on Sept. 1-2, 2016
+(?Evaluation and dissemination plan for standards and policies?). It
+is understood that its content may be require changes.
+
+This document is intended to describe the goals, nature, and strategy
+for code, data, and documentation management of NEMS modeling
+applications and suites.
+
+Overall Goals
+-------------
+
+* Ensure that NCEP operational requirements are met.
+
+* Maintain all components used in NEMS-based suites as a set of
+  community modeling applications that are part of a coordinated,
+  unified modeling system.
+
+* Foster community involvement, experimentation, and model improvement
+  to accelerate R2X.
+
+* Promote accessible, documented, robust, and portable code. 
+
+Stakeholders and Contributors
+-----------------------------
+
+* EMC modeling branch (Vijay Tallapragada)
+* NCO
+* ESMF and CoG team (Cecelia DeLuca, Gerhard Theurich)
+* Developmental Testbed Center (Ligia Bernardet, Laurie Carson)
+* HIWPP (Bonny Strong)
+* Developers from NOAA Laboratories (GSD, PSD, HRD)
+* CESM (Mariana Vertenstein)
+* COLA (Jim Kinter, Larry Marx)
+* VLab team (Ken Sperow)
+
+Others involved in NOAA model development are welcome to comment and contribute.
+
+Terminology and Background
+--------------------------
+
+\note This section was reviewed and approved by Hendrik Tolman. It
+contains terminology that will need to be updated for conformance with
+the system architecture document glossary.
+
+The NOAA Environmental Modeling System (NEMS) is infrastructure that
+underlies models used for predictions of the behavior of Earth's
+environment at a range of time scales. NEMS development is centered at
+the NOAA Environmental Modeling Center (EMC). Examples of other
+coupled modeling systems are the
+[Community Earth System Model (CESM)] (http://www2.cesm.ucar.edu/)
+and the 
+[Met Office Unified Model] (http://www.metoffice.gov.uk/research/modelling-systems/unified-model).
+
+NEMS includes model coupling and utility infrastructure and is
+associated with a collection of model components representing major
+Earth system domains and processes.  External model components have a
+primary development repository that is not at EMC. In general, model
+components are coupled through the NEMS mediator (in other modeling
+systems this is often called the "coupler").
+
+NEMS can be assembled into a number of different modeling
+applications. Modeling applications are associated with a purpose,
+like medium-range forecasting; a set of model components; and a set of
+parameters that represent a range of supported options, including
+grids and resolutions. Different NEMS modeling applications can have
+different types and numbers of model components. Also, the same
+physical domain may be represented by different model components in
+different modeling applications. For example, in some NEMS modeling
+applications the ocean component may be HYCOM and in others it may be
+MOM5.
+
+The Unified Global Coupled System (UGCS) is a set of NEMS applications
+that share model components and can operate across weather,
+sub-seasonal, and seasonal scales. UGCS-Seasonal is the first fully
+coupled application to be developed with NEMS.
+
+NEMS is built using the 
+[Earth System Modeling Framework (ESMF)] (http://www.earthsystemmodeling.org/)
+infrastructure software. ESMF provides utilities like generation of
+interpolation weights and time-related classes, and also wrappers that
+create a standard component calling interface. This enables model
+components developed at different sites to be coupled more easily.
+
+The 
+[National Unified Operational Prediction Capability (NUOPC)] (https://earthsystemcog.org/projects/nuopc/)
+Layer
+adds additional rules about how ESMF models interact in order to
+increase their technical interoperability. The NUOPC Layer addresses
+aspects such as the level of build dependencies, standardization of
+initialization phases, and standard names for exchange fields. NEMS is
+an example of a modeling system built using the NUOPC Layer
+architecture. In order to interface with the NUOPC Layer model
+component developers write wrappers or ?caps? for the component. The
+caps contain translations of native data structures (e.g. grids, field
+data, time quantities), by reference or copy, into ESMF data
+structures.
+
+The NEMS-based atmospheric models have two main elements: dynamic core
+and physical parameterizations. The Physics Interoperability Layer is
+a wrapper used to call the physical parameterizations. The physical
+parameterizations for the 
+[Next-Generation Global Prediction System (NGGPS)] (http://www.nws.noaa.gov/ost/nggps/)
+will be contained in the Common Community Physics Package, a
+set of parameterizations modularized by physics type (radiation,
+microphysics, etc.).
+
+A list of NEMS modeling applications is here:
+
+* [VLAB NEMS Modeling Applications Page] (https://vlab.ncep.noaa.gov/group/guest/welcome?p_p_id=101&p_p_lifecycle=0&p_p_state=maximized&p_p_mode=view&_101_struts_action=%2Fasset_publisher%2Fview_content&_101_returnToFullPageURL=%2Fgroup%2Fguest%2Fwelcome&_101_assetEntryId=2316208&_101_type=content&_101_groupId=95585&_101_urlTitle=nems-applications&_101_redirect=https%3A%2F%2Fvlab.ncep.noaa.gov%2Fgroup%2Fguest%2Fwelcome%3Fp_p_id%3D3%26p_p_lifecycle%3D0%26p_p_state%3Dmaximized%26p_p_mode%3Dview%26_3_groupId%3D0%26_3_keywords%3Dnems%2Bapplication%26_3_struts_action%3D%252Fsearch%252Fsearch%26_3_redirect%3D%252Fgroup%252Fguest%252Fwelcome&inheritRedirect=true)
+
+Workflow components refer to all packages that are used to to produce
+an end-to-end run, encompassing not only the modeling application
+itself, but also the broader category of software that implement
+functions such as pre-processing, data ingest, and post-processing.
+Each workflow component will include source code, configuration
+settings, and workflow controls for the component.  For example, in
+the Hurricane Weather Research and Forecast (HWRF) suite, a workflow
+component is the Gridpoint Statistical Interpolation (GSI) data
+assimilation, which is executed in different configurations depending
+on storm characteristics, available datasets, etc.
+
+A modeling suite is a collection of workflow components assembled for
+a particular purpose.  Each operational modeling suite is such a
+collection. For example, the North American Mesoscale (NAM) suite
+includes a data assimilation workflow component, a NEMS modeling
+application, etc.  The Short-Range Ensemble Forecast (SREF) suite
+includes a similar NEMS modeling application (the Non-Hydrostatic
+Multiscale Model on the B-grid, no ocean) but has additional workflow
+components for ensemble processing.
+
+For illustration purposes, Table 1 shows a simplified list of workflow
+components for two existing suites, the NAM and the HWRF, along with a
+future suite (NGGPS Numerical Weather Prediction - NWP). The table
+shows that NEMS-based suites, such as NAM and NGGPS NWP, use several
+workflow components which are common to NEMS and/or non-NEMS
+suites. The following workflow components are highlighted in the
+table: GSI, WRF Preprocessing System (WPS), NEMS Preprocessing System
+(NPS), and Unified Post Processor (UPP).
+
+#### Table 1: Workflow components used in a suite (simplified)
+
+| Suite     | GSI    | WPS   | NPS   | NEMS   | UPP    |
+| :-------: | :----: | :---: | :---: | :----: | :----: |
+| NAM       | YES    |       | YES   | YES    | YES    |
+| NGGPS NWP | YES    |       |       | YES    | YES    |
+| HWRF      | YES    | YES   |       |        | YES    |
+
+Modeling Applications and Suites
+================================
+
+Parts of a NEMS-based Modeling Application
+------------------------------------------
+
+* NEMS coupling software
+* NEMS model components for atmosphere, land, ocean, ice, ionosphere etc.
+* NEMS build system and configuration scripts
+* NUOPC caps for each component
+* Physics Interoperability Layer 
+* Common Community Physics Package
+
+Parts of a Modeling Suite
+-------------------------
+
+* Modeling application(s)
+* Build system and makefiles for all workflow components
+* Pre-processors and data assimilation codes (NPS, GSI etc.)
+* Post-processors, product generators, verification, etc.
+* Running scripts, configuration files, suite automation files
+
+Recent Changes at NCEP
+======================
+
+\note This section was reviewed and approved by Hendrik Tolman.
+
+There are three ways in which new code development at NCEP is
+significantly different than what has come before. These changes have
+ramifications for repository and code management.
+
+First, there is increasing emphasis on community modeling. Numerous
+model components developed external to NCEP through community-based
+approaches are being incorporated into NCEP operations. Further, the
+model components (such as WAVEWATCH III) and modeling applications
+that are developed at NCEP centers increasingly engage the broader
+community throughout the development process.
+
+
+Second, there are many more model components in NCEP modeling
+applications than before, with more than eight types of types of
+separable model components (e.g. ocean) anticipated in NEMS, and
+multiple options for each type (e.g. MOM5, HYCOM, data and stub
+versions of the ocean component). This is a fundamental change that
+affects code management in a number of ways. Repository plans, testing
+and experimentation, and other aspects of code handling require
+special consideration. We include in the contributors to this document
+experts in the management, community support, and evaluation of
+many-component modeling applications.
+
+Third, there is a desire to move to a unified modeling system that can
+operate more seamlessly across temporal and spatial scales. The common
+NEMS framework will be used to compose multiple modeling applications
+that can be used for different purposes. This means that where model
+components are common to modeling applications, and where workflow
+components are common to suites, changes need to be synchronized
+across the modeling applications and suites. For this reason, the
+repository and code management strategies for NEMS-based modeling
+applications and suites need to be considered holistically.
+
+Modes of Use and Implications
+=============================
+
+The expectations about the modes of use for NEMS modeling applications
+and suites, including both operations and research, will inform
+choices about the repository and code management strategy.
+
+Operations
+----------
+
+EMC builds modeling applications to run within production suites.  The
+goal is to create end-to-end systems that includes observational
+processing, data assimilation, model forecasts, post processing,
+verification, and assessment.
+
+Operational Workflows for Community Testing
+-------------------------------------------
+
+In order to support community engagement in the testing of modeling
+applications in operational suites, it is critical for the broader
+community to be able to run the production suites and evaluate changes
+according to skill and other prescribed metrics. This mode of
+community evaluation offers the most direct path from research to
+operations.
+
+For the UGCS-Seasonal application, the expectation is that EMC will
+collaborate with the community on the design of a test harness, where
+EMC will make the control runs. Various partners and collaborators
+that are working on the upgrades to the physics, etc. will experiment
+with this basic version. All results will be posted to the EMC
+server. EMC will also work with the community on developing
+verification methods, such as a climate scorecard. As new
+components/upgrades take place, EMC will upgrade the repository trunk
+of the application accordingly.
+
+Model Development, Research, and Testing Workflows
+--------------------------------------------------
+
+It will be useful from both a scientific and practical perspective for
+model developers and researchers to be able to run NEMS modeling
+applications and suites in ways that provide more flexibility than the
+operational workflows. For example, substantial research is needed to
+improve understanding of the interactions and feedbacks between system
+components. In addition, the option to configure and run a modeling
+suite in a research mode, which may differ from the operational
+modeling suites, is important for the transition of research
+capabilities to operational systems.  Providing easy access to the
+NEMS mediator in an environment conducive to experimentation, such as
+NCAR?s CIME, will promote use of the ESMF/NUOPC interfaces and support
+research to operations across a broader community.
+
+Intervening at the interface between component models makes it
+possible to control feedbacks between component models ? a powerful
+paradigm for evaluating the role of those feedbacks ? and it makes it
+possible to artificially alter the interactions among components to
+determine sensitivity and predictability. Similarly, alternative
+configurations such as the interactive ensemble (multiple simultaneous
+instantiations of one or more component models),
+super-parameterization (sub-cycled, sub-grid-scale representation of
+processes), and others, should be supported with the operational
+component models and the NEMS mediator.
+
+To support such controlled experimentation, it is helpful if each
+model component can be run in a number of modes: fully prognostic
+mode, as a data model component that reads in observational or model
+generated data, or as a stub to satisfy interface requirements. This
+flexible design permits feedbacks between components to be selectively
+turned off, thereby enabling researchers to address a broad range of
+scientific questions and also to determine what aspects and components
+of the Earth system must be dynamic and fully coupled in order to
+satisfy operational forecasting requirements.
+
+In order to be used in a community context, it is important to enable
+users to easily create custom configurations for their experiments,
+including running numerous out of the box experiments with various
+levels of model feedbacks enabled at a number of model resolutions and
+configurations.
+
+Non-scientific (e.g. data, stub) versions of components can also be
+useful for introducing new components. The technical integration of
+existing prognostic components into the coupled system often happens
+in three stages. In the first stage, feedbacks are turned off and
+validation is carried out in a ?stand-alone? configuration. In the
+second stage, a few selected feedbacks are turned on. In the final
+stage, full coupling with all prognostic components is implemented and
+evaluated. This sort of independent development of components followed
+by an integration stage may not always be ideal for scientific
+development of coupled systems, but it is an approach used frequently
+nonetheless.
+
+For any complex software system, and especially for a coupled modeling
+system, automated system and unit tests to verify infrastructure
+functionality on a variety of machines and compilers are
+essential. These tests satisfy a number of high-level software
+requirements, such as the ability to give bit-for-bit identical
+answers after restarting from a checkpoint, providing answers
+independent of processor and/or OpenMP thread count, and ensuring that
+code refactorings do not change answers unless they are expected to do
+so.  It is useful to be able to run the entire system test suite with
+a single command.
+
+Operating Principles
+====================
+
+\note Management review of this section is one of the items identified as an action in the CDDM workshop on Sept 1-2, 2016 (?Operating Principles?).
+
+The following are a set of operating principles (OP later in the
+document) that apply across multiple categories of requirements.
+
+* Make decisions with the end goal of producing the best possible forecasts, based on evidence rather than assertion.
+
+* Operate efficiently. Avoid redundancy, and favor automation over manual processes.
+
+* Promote transparency. To the extent possible, make code, data, documentation, requirements, policies, and plans openly available and clear.
+
+* Encourage community ownership and participation. Establish processes that enable partners to engage in decision-making.
+
+* Ensure that organizational structures support clear responsibilities and accountability.
+
+Roles needed
+============
+
+\note Completion and review of this section is one of the items
+identified as an action following the CDDM Sept. 1-2, 2016 workshop
+(?Organizational Description?).
+
+* **Component code managers** --- These are people internal to EMC
+  who are responsible for the technical aspects of the component code
+  and its integration into the unified system.
+
+* **External component points of contact** --- These are people
+  associated with external component development who can serve as a
+  point of contact for coordination and synchronization with EMC.
+
+* **Application leads** --- People who are responsible for the
+  scientific and technical development and delivery of a particular
+  application.
+
+The current status at EMC is that there is a technical team with the following members:
+
+* Mark Iredell: manager
+* Sam Trahan: NEMS code manager
+* Valbona Kunkel: Documentation
+* Hang Lei: Physics interface
+* Terry McGuinness: workflow
+* Kate Howard: scripts
+
+\note It would be useful to describe the responsibilities of this team.
+
+Decision Making about Code, Data, and Documentation Processes and Policies
+--------------------------------------------------------------------------
+
+\note Following the Sept 1-2, 2016 CDDM workshop, NCEP/EMC agreed to
+propose a process for reviewing software policies and processes. This
+has not been delivered.
+
+What this might look like:
+
+1. NCEP/EMC software lead decides whether or not the proposal merits review.
+
+2. NCEP/EMC software lead and the proposer(s) decide on a team of reviewers.
+
+3. One or more calls or meetings are scheduled to discuss and refine the proposal.
+
+4. A decision is made following a clear protocol.
+
+5. The proposal is included in a logical place in a developer guidance
+document, announced if appropriate, and posted to a development
+website.
+
+6. Actions in the proposal are implemented.
+
+Requirements Format and and Collection
+======================================
+
+\note This section requires management review and
+concurrence. Currently, although EMC participated in formulating these
+guidelines, they are not following them. The convention should be
+modified or replaced with the format and procedures NCEP/EMC intends
+to follow for collecting and disseminating requirements.
+
+Documented requirements and expectations, collected from appropriate
+stakeholders, serve as a foundation for formulating strategies and
+making decisions.
+
+This document introduces a convention for requirements collection and
+presentation. Each entry includes:
+
+* **Id** --- Requirement short identifier and number, e.g. SM1 (Software Management 1)
+
+* **Type** --- Current classifications include goal (general guidance
+  or direction), expectation (exp; an assumption), requirement (req; a
+  necessity), and recommendation (rec; a desire or suggestion).
+
+* **Item** --- Description of the entry.
+
+* **Reason** --- Rationale or motivation for the entry.
+
+* **Source** --- Person or group that originated the entry.
+
+* **Status** --- Implementation status or timeline associated with the entry.
+
+A simple requirements collection process typically involves
+identifying a scope, coordinator or moderator, a set of stakeholders,
+and assembling the requirements through a joint discussion. A new set
+of requirements can be expected to evolve and become a basis for
+action through ongoing vetting, periodic updates, and being referenced
+by those developing plans and products.
+
+Software Management
+-------------------
+
+### Goals, Requirements and Expectations
+
+\note This requirements section requires management review and concurrence. 
+
+In the chart below, OP stands for Operating Principles.
+
+
+### SM1: Minimize Software Repositories Per Component
+
+Minimize the number of software repositories required per
+component. Best practices in software configuration management
+recommend using a shared common repository for development where
+feasible.  New development can be managed using branches (or forks or
+equivalent) with the understanding of a common authoritative source
+(master/trunk) and a procedure for integrating new development into
+the source repository.  This approach utilizes the strengths of
+configuration management tools, while minimizing the work and risk
+involved in maintaining duplicate repositories.  
+
+**Type**: goal
+
+**Source**: GMTB
+
+**Status**: No policy in place
+
+**Reason**: OP, avoid duplication
+
+
+### SM2: Source Code Available
+
+All source code for operational modeling applications and suites must
+be available on EMC computers.  Availability in case there are
+problems.
+
+**Type**: Requirement
+
+**Source**: NCO
+
+**Status** Implemented
+
+**Reason** Availability in case of problems
+
+
+### SM3: Accessible Repositories
+
+NEMS modeling applications and suites are expected to utilize multiple
+development repositories, including repositories managed externally
+and by EMC. It must be possible for developers (NOAA and non-NOAA) to
+access codes in the development repositories, workspaces, and
+trackers, with specific permission levels (read, read/write).
+
+**Type**: EXP
+
+**Source**: OAS
+
+**Status**: Not fully implemented, some key developers do not have access to workspaces and trackers.
+
+**Reason**: Access is needed for development.
+
+
+### SM3a: Current Operational Code is Available
+
+It is essential that the currently operational code can be checked
+out. An exact mirror repository should be maintained that always has
+the latest operational code.  
+
+**Type**: Requirement
+
+**Source**: COLA/Kinter
+
+**Status**: No policy in place
+
+**Reason**: This is needed to streamline the transition from research
+to operations
+
+
+### SM4: EMC Repository Developer Responsibilities
+
+The following apply to developers working in an EMC repository:
+* Developer maintains own branch of trunk.
+* Commit work back to branch from working copy frequently.
+* Keep up to date with trunk.
+* Use test harness for regression and suite testing prior to commits.
+* Use ticket system as required.
+
+**Type**: EXP
+
+**Source**: EMC/Tollman
+
+**Status**:  Unable to implement because of lack of access to computers and trackers..
+
+**Reason**: Follow good software practices for development.
+
+
+### SM5: External Component Development Responsibilities
+
+The following apply to developers working with external components:
+
+* If procedures used in the external, authoritative repository are
+  compatible with NEMS development, the procedures specific to that
+  component will be followed.
+
+* If the external, authoritative repository cannot support component
+  development for NEMS, a development repository for EMC use will be
+  established and the procedures established for EMC repositories
+  followed.
+
+**Status**: Implemented.
+
+**Reason**: Balance between low process overhead and control over processes.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+
+### SM6: Components have Identified Leaders
+
+There is an identified EMC component lead for all model and suite
+components. There is an external component lead identified for
+external components.
+
+**Type**: Requirement
+
+**Source**: NCO
+
+**Status**: Implemented
+
+**Reason**: OP, accountability.
+
+
+### SM7: Identified Leaders for NEMS Documentation
+
+There are identified leads for the overall NEMS system development at EMC.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+**Status**: Not implemented.
+
+**Reason**: OP, accountability.
+
+
+### SM8: Synchronization Path to External Component Repositories
+
+Code changes to external components taking place in their native
+repositories must have a path for synchronization with changes made to
+these components at EMC, and vice versa. It is up to the EMC component
+lead to synchronize changes between development and operational
+repositories, in coordination with the external component lead. If
+necessary, users can download a tar file of a code release and return
+changes via tar file.
+
+**Type**: Requirement
+
+**Source**: EMC/Tollman
+
+**Status**: No controls in place.
+
+**Reason**: Need for synchronization of development to maintain
+  coherence in community-based unified system.
+
+
+### SM9: Synchronization Path of Component Code Between Applications
+
+Changes in components and infrastructure made for any given NEMS
+modeling application must have a process for synchronization with
+versions of these codes used by other NEMS applications.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+**Status**: No policy in place..
+
+**Reason**: Need for synchronization of development to maintain
+  coherence in community-based unified system.
+
+
+### SM10: Standardized Testing and Implementation System
+
+There is standard regression, suite, operations testing for
+respectively software, science, and implementation.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+**Status**: Key processes not implemented.
+
+**Reason**: Critical part of software process.
+
+
+### SM11: Repository Strategy Supporting Many Components and Applications
+
+The repository strategy must support testing and experimentation with many-component modeling applications and suites.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+**Status**: Repository strategy is not fully defined.
+
+**Reason**: Needed to manage development in multi-component, multi-application NEMS system.
+
+
+### SM12: Component Versions Associated with Application Versions
+
+It must be possible to easily assemble a version of a particular
+modeling application, with versioned constituent components.
+
+**Type**: Requirement
+
+**Source**: OAS
+
+**Status**: Implemented
+
+**Reason**: Needed to manage development in multi-component,
+   multi-application NEMS system.
+
+
+### SM13: Availability of Stub, Data, and Active Model Components
+
+Model components must offer active, stub, and data versions for
+testing and experimentation.
+
+**Type**: Requirement
+
+**Source**: OAS, EMC/Grumbine
+
+**Status**: Data versions are not available.
+
+**Reason**: Needed for testing and development.
+
+
+Tools and Options: Collaboration Environments and Workflow Software
+===================================================================
+
+NWP Information Technology Environment (NITE)
+---------------------------------------------
+
+* [NWP Information Technology Environment (NITE)] (http://www.dtcenter.org/eval/NITE/)
+
+NITE is the collective name of various elements needed to support a
+computational infrastructure for simplified configuration and
+execution of experiments with NCEP modeling suites. These elements
+are: data management, code management, suite configuration, scripts,
+workflow management system, documentation/training, and database of
+experiment metadata. The Developmental Testbed Center (DTC) has
+created a 
+[preliminary NITE design] (http://www.dtcenter.org/eval/NITE/NITE-report-AOP2014.pdf),
+with a focus of making code, datasets, and running infrastructure
+easily available to NCEP and its external collaborators. EMC
+leadership has indicated that this design will be considered for
+building the NGGPS infrastructure. Since code management is one of the
+NITE elements, the recommendations contained in this document can be
+considered as an integral part of NITE.
+
+Virtual Lab (VLab)
+------------------
+
+The VLab is a service and IT framework, enabling NOAA employees and
+their partners to share ideas, collaborate, engage in software
+development, and conduct applied research.  The VLab is comprised of
+two main components:
+
+* Virtual Lab Collaboration Services (VLCS)
+* Virtual Lab Development Services (VLDS)
+
+The VLCS is built upon a feature rich open source Java portal
+framework called Liferay. The VLCS enables users to share and
+contribute science and participate in dynamic virtual communities. The
+VLCS provides powerful tools for collaborating such as document
+libraries, wikis, message boards, blogs, announcements, dynamic forms
+with workflow, and a content management system (CMS). All tools within
+the VLCS are searchable and centralized.  The VLCS provides a full
+featured administrative console and robust roles and permissions
+framework.
+
+The VLDS provides web based services to help manage projects via issue
+tracking, source control sharing, code review, and continuous
+integration. Redmine is being used for issue tracking for all projects
+and access control for simple projects? source code repositories
+within the VLab.  Subversion and Git, a distributed configuration
+management (CM) system, are being used for source code control within
+the VLab and integrate with Redmine.  Gerrit provides web based code
+review and project management tools to Git based projects in the VLab.
+Jenkins provides a web based continuous integration tool within the
+VLab.  Projects can use Jenkins to automate the building of their
+code, execute unit tests, and perform custom checks anytime code is
+modified.
+
+Earth System CoG
+----------------
+
+The Earth System CoG is an open, multi-agency collaboration
+environment that supports project hosting and linked project networks
+for software development and data dissemination. Project web pages are
+wikis that are easy to create and navigate. The environment is set up
+as a federation, so that federal centers and universities can install
+their own local CoG, but also access and link to projects across the
+full set of CoGs. CoG provides views of information across entire
+networks (for example, all people or all repository links). CoG also
+offers extensive data archival and search services through an
+integrated interface to the 
+[Earth System Grid Federation (ESGF)] (http://esgf.llnl.gov/),
+an international data distribution network. A large network of CoG
+site installations around the world is being used to support data
+dissemination of model output in support of the Intergovernmental
+Panel on Climate Change (IPCC) model intercomparison projects.
+
+A CoG is installed at NOAA ESRL and is being used to host the
+[workspace for NEMS model coupling] (http://cog-esgf.esrl.noaa.gov/projects/couplednems/),
+along with the
+[ESMF site] (https://www.earthsystemcog.org/projects/esmf/), the
+[NUOPC Layer site] (http://www.earthsystemcog.org/projects/nuopc/),
+and workspaces for individual NEMS-based applications such
+as the
+[WAM-IPE space weather coupling] (http://cog-esgf.esrl.noaa.gov/projects/wam_ipe/).
+There is also a CoG at GFDL, and CoG installation is underway at NCEI.
+
+\note Update link to WAM-IPE webpage when if it is moved to repo
+
+CIME
+----
+
+The Common Infrastructure for Modeling the Earth (CIME) is a public,
+github based infrastructure repository initiated by the Community
+Earth System Model (CESM) software engineering group in April
+2015. CIME was developed as a response to the February 2
+
+
+Tools and Options: Repository Software and Services
+===================================================
+
+In this section we describe options related to code access and
+developer access (which assumes write access) for infrastructure and
+scientific software and documentation.
+
+Repository Software: Git and Subversion
+---------------------------------------
+
+[Git] (https://git-scm.com/)
+and
+[Subversion (SVN)] (https://subversion.apache.org/)
+are both widely used version control software packages. There are
+several advantages to using Git relative to Subversion and other
+version control software. Git is faster, was designed to to be fully
+distributed from the start, and provides a forking service which
+simplifies the process for developers to submit development work back
+to a master repository. In addition, the following advantages of Git
+are listed at:
+
+* https://git.wiki.kernel.org/index.php/GitSvnComparison
+
+Several teams that support multi-component systems, including CESM,
+mix Git and Subversion in order to get desirable properties from
+each. Using both can also be a viable implementation strategy for
+non-disruptive transitions from use of Subversion to full use of
+Git. Such an approach is described in the Current Practice and
+Recommended Evolution section.
+
+Repository Software Service: Github, VLab, or elsewhere (such as NCAR or EMC servers)
+-------------------------------------------------------------------------------------
+
+Having the source code accessible is very important for EMC to meet
+its mission of fostering community involvement for NWP
+development. Therefore it needs to be straightforward for community
+members to obtain access to the code repositories that house the
+components of NEMS-based modeling applications and suites.
+
+
+Most EMC NWP codes currently reside in SVN repositories accessible
+through EMC servers. Access to those servers is very restricted to
+non-NOAA developers. One exception is HWRF, whose code resides at NCAR
+and GSD and is managed by DTC. To alleviate access restrictions to the
+general community, DTC has also mirrored some parts of the EMC SVN
+repositories at NCAR (for example, NEMS and GSI). However, the mirrors
+are labor intensive, prone to error, and many times insufficient (when
+not all branches are mirrored).
+
+In contrast, access to code repositories served through GitHub is
+straightforward. Access to VLab is likely to be easy as
+well. Therefore, those alternatives are expected to foster a more
+robust collaborative environment at lower cost.
+
+In addition to repository access, both GitHub and VLab have several
+tools that greatly facilitate collaborative software
+development. Github provides both wikis and issue tracking that can be
+utilized to track both repository specific issues or general issues
+for the whole project. Github provides tools for inspecting the
+history and structure of the repository. It has excellent
+documentation that permits both novice users and experts to make the
+optimal use of git, and provides an organizational structure that
+allows for both public and private repositories on a very cost
+effective basis. Finally, GitHub has both excellent uptime and
+customer support.  Repository Access and Use Procedures
+
+
+
+
+
+
+
+<table>
+<tr><th>&nbsp;</th><th>EMC</th><th>CIME</th><th>VLab</th></tr>
+<tr>
+  <th>Contents</th><td>
+        Science components, coupling infrastructure, workflow infrastructure
+  </td><td>
+        Infrastructure only; currently CESM and ACME coupling and workflow infrastructure and non-scientific versions of model components
+  </td><td>
+        Any NOAA R&D project repositories
+  </td>
+</tr><tr>
+  <th>
+        Version control software protocol
+  </th><td>
+        Subversion
+  </td><td>
+        Git 
+  </td><td>
+        Git or Subversion
+  </td>
+</tr><tr>
+  <th>
+        Version control software service
+  </th><td>
+        EMC server
+  </td><td>
+        Github
+  </td><td>
+        Subversion and Git over https or Git through Gerrit
+  </td>
+</tr><tr>
+  <th>
+        Public download?
+  </th><td>
+        Not for development code.
+  </td><td>
+        Development and release versions can be downloaded by anyone from https://github.com/CESM-Development/cime. 
+  </td><td>
+        Not currently.  Must have a NOAA LDAP login or VLab external partner login.
+  </td>
+</tr><tr>
+  <th>
+        Information collected from users?
+  </th><td>
+        NOAA computer access requires submission of fingerprints and detailed personal history for a Federal background investigation. 
+  </td><td>
+        No information is collected other than that required for a Github account.
+  </td><td>
+        None required for NOAA accounts.  NOAA External partner sponsors  must supply basic contact information of external partner (e.g., phone, email, name, and justification). 
+  </td>
+</tr><tr>
+  <th>
+        Download metrics available?
+  </th><td>
+        Not easily
+  </td><td>
+        Github tracks downloads/clones, and this is available to project administrators
+  </td><td>
+        Some usage statistics are available through Google Analytics
+  </td>
+</tr><tr>
+  <th>
+        Typical length of time required for read access?
+  </th><td>
+        2-6 months
+  </td><td>
+        Immediate
+  </td><td>
+        Same or next day
+  </td>
+</tr><tr>
+  <th>
+        Typical length of time required for write access?
+  </th><td>
+        At least 6 months
+  <td></td>
+        Less than a day to days
+  </td><td>
+        Same or next day
+  </td>
+</tr>
+</table>
+
+
+
+
+Procedure for Developer Access
+------------------------------
+
+### CIME
+
+To write to CIME a user issues a pull request on github. This is documented at
+
+* https://github.com/CESM-Development/cime/wiki/CIME-Development-Guide
+
+Anyone can issue a pull request, regardless of if they are inside or
+outside of NOAA. Currently, the gatekeepers for determining whether or
+not to accept a pull request are members of the CESM Software
+Engineering Group (or CSEG). However, this governance structure can be
+extended depending on the collaboration. As an example, CESM and the
+DOE/ACME project have created a new github repository,
+https://github.com/ESMCI/cime that currently mirrors CIME, but where
+the determination of what pull requests to accept are made jointly by
+both CSEG and ACME software engineers. A similar CIME repository
+structure can also be established with NOAA or the CIME administration
+structure can be easily enhanced using github's administration and
+organizational tools.
+
+CSEG has established a workflow for using and developing CIME. This
+can be found at
+
+* https://github.com/CESM-Development/cime/wiki/CIME-Git-Workflow
+
+Further documentation for dealing with the externals in CIME can also
+be found at
+
+* https://github.com/CESM-Development/cime/wiki/Managing-externals-included-in-CIME.
+
+CIME is just in its infancy and as it continues to expand in the
+number of collaborators that are interested in using and contributing
+to it, a more formal governance structure will need to be established.
+
+### VLab
+
+Any user with a NOAA LDAP ID has access to the VLab.  If the user does
+not have a NOAA LDAP ID a NOAA user can request a VLab account for
+external partners (e.g., other government agencies, University
+Partners) through VLab forms.  Project owners add users as a member of
+their project within VLab?s Redmine web interface to give users access
+to the underlying repository.  See the 
+[VLab Wiki guidance] (https://vlab.ncep.noaa.gov/redmine/projects/vlab/wiki/Help_for_Project_OwnersManagers#Adding-Members-to-a-Project)
+for more information.
+
+### EMC Repository
+
+Access to NOAA computers requires the following steps:
+     
+* Obtain and utilize a NOAA email address to communicate regarding
+  NOAA issues. This requires several steps (allow 2-4 weeks of
+  processing time).
+
+* Complete NOAA?s annual IT Security Awareness Course ( ITSAC) (1 hour
+  of security training).  Repeated annually.
+
+* Complete a National Agency Check and Inquiries (NACI) background
+  check. This includes fingerprinting and providing a detailed
+  personal history (allow minimum of 6 months).
+
+* Computer account application (allow a minimum of 2-4 weeks of
+  processing time).
+
+* Request access to the NEMS repository and group permissions (allow
+  1-2 weeks of processing time).
+
+* Users must login every 30 days to keep their proxy certificate up to
+  date or their account will be disabled.
+
+* Verify account information on an annual basis.
+
+There is a slightly faster path: acquiring checkout approval for the
+EMC NEMS SVN repository takes on the order of a month or two, and the
+code can be checked out onto a widely accessible computer at NCAR
+(yellowstone). In practice, this solution often requires people to
+move files across machines in order to collaborate, and is
+error-prone.  
+
+
+
+
+
+
+
+
+
+
+
+
+
+Software Management Current Practice and Recommended Evolution
+==============================================================
+
+\note This section requires management review and concurrence. The
+assembly of a plan for repository management is one of the items
+identified as an action in the CDDM workshop on Sept. 1-2, 2016
+(?Repository reference document?). There is also a technical action
+associated with this section (?Web-based interface for account
+requests?).  NEMS Repository
+
+NEMS modeling applications are becoming more complex due to the fact
+that they are now multi-component applications. The matrix view, with
+components along the horizontal, and applications along the vertical,
+is a useful visualization of this. It can be seen in the 
+[NEMS application list] (https://vlab.ncep.noaa.gov/group/guest/welcome?p_p_id=101&p_p_lifecycle=0&p_p_state=maximized&p_p_mode=view&_101_struts_action=%2Fasset_publisher%2Fview_content&_101_returnToFullPageURL=%2Fgroup%2Fguest%2Fwelcome&_101_assetEntryId=2316208&_101_type=content&_101_groupId=95585&_101_urlTitle=nems-applications&_101_redirect=https%3A%2F%2Fvlab.ncep.noaa.gov%2Fgroup%2Fguest%2Fwelcome%3Fp_p_id%3D3%26p_p_lifecycle%3D0%26p_p_state%3Dmaximized%26p_p_mode%3Dview%26_3_groupId%3D0%26_3_keywords%3Dnems%2Bapplication%26_3_struts_action%3D%252Fsearch%252Fsearch%26_3_redirect%3D%252Fgroup%252Fguest%252Fwelcome&inheritRedirect=true)
+and in also in the matrix of workflow components shown in the
+Terminology and Background section of this document. These matrices
+show that the same components are going to be used in several modeling
+applications. In other words, there are complex software interactions
+going many different ways within the matrices. The desire for a
+unified system recognizes the advantage of not treating each
+application line in the matrices as separate from the other. This is
+also reflected in the software management requirements and
+expectations. The following approach carries this awareness through to
+the construction of the revision control architecture.
+
+In the proposed revision control architecture, the applications, the
+components, and the NEMS infrastructure itself are top level
+directories under the EMC /projects repository. EMC refers to each of
+these directories as a project. The connection to the matrix
+representation is made by introducing SVN externals (here just called
+links). The proposal is to recognize the components as the primary
+sorting axis, and to introduce an SVN external link to a specific
+revision of each component from each application. The application
+group still has full control over the component sources, but there is
+this recognition that the components are the building blocks. Figure 1
+shows a UGCS-Seasonal revision defined as a set of links to specific
+revisions of NEMS (which currently includes GSM), MOM5, and CICE.
+
+Building a complex system that supports many components and many
+applications is not something that happens without a lot of
+effort. It's like a family, each member needs to grow individually,
+and they need the freedom to do so, but if you want to still be
+recognizable as a family, there are times for each member to put
+effort into something that maybe is not directly their interest. Same
+here: if the vision of a unified modeling system is going to be
+realized, there must be times when changes to common pieces are
+reconciled again. The more convenient and obvious we can make this
+within the revision control architecture, the better. Requirement SM6,
+which requires a liaison internal to EMC for each component, and a
+liaison external to EMC for each component, begins to create a
+structure for coordination.
+
+It should also be realized that we are trying to set up an
+evolutionary rapid development process. This is in many ways a new
+approach for EMC, but really the only way that applications can be
+delivered in time, while moving toward a unified modeling system. It's
+probably fair to say that the era in which you could wait for a
+"working system" for a specific application, and then go and modify it
+for the operational need, is over. Instead the system as a whole must
+be evolvable, ready to react, and to do this systematically and
+responsively. On the source level, SVN with the proposed repo
+architecture can serve as the necessary backbone for this development
+style.
+
+\image html repo-structure.png "Figure 1. Project Directory Structure for NEMS Components and Applications"
+
+Proposed directory structure for the CFS version 3 project.
+
+Component Sets (Compsets)
+-------------------------
+
+The concept of "component sets", or "compsets" for short, has been
+used successfully by the
+[http://www2.cesm.ucar.edu/] (CESM)
+project for some time to 
+[systematically label different run configurations] (http://www.cesm.ucar.edu/models/cesm1.2/cesm/doc/modelnl/compsets.html).
+The labels are associated with scripts that pull
+together all the files and inputs needed to run the specified
+configurations. This approach offers a number of benefits:
+
+* standard runs can be set up easily and consistently
+
+* it is a very effective way to implement regression testing across a
+  system with many possible combinations of components
+
+* it is a systematic way of managing the complexity associated with
+  not just many components, but multiple (data, stub) non-prognostic
+  versions of components, and facilitates controlled experimentation
+
+Compsets are currently being used only for technical testing in NEMS,
+but could also be applied to scientific model configuration and
+validation. More information about compsets is in the 
+\ref documentation "NEMS Users Guide".
+
+Guidelines for Software Checkins and Updates
+--------------------------------------------
+
+The instructions in this section use the following terms to refer to the parts of an URL pointing to a software package ?ABC? that is under SVN revision control:
+
+**Complete URL:**
+
+ * https://server.noaa.gov/path/ABC/subdir
+
+This is the complete URL that can be used in a SVN checkout command to obtain an instance of the full source code structure of the ABC software.
+
+**Base URL:**
+
+ * https://server.noaa.gov/path/ABC
+
+This is base URL of the ABC software. Following SVN conventions there
+should be exactly three canonical subdirs under the base URL: trunk,
+branches, tags. However, this is only a convention and there is no
+enforcement mechanism.
+
+**Trunk URL:**
+
+ * https://server.noaa.gov/path/ABC/trunk
+
+This is the complete URL that points to the trunk of the ABC software
+(following SVN convention).
+
+**Branch URL:**
+
+ * https://server.noaa.gov/path/ABC/branches/name
+
+This is the complete URL that points to the `name` branch of the ABC
+software (following SVN convention).
+
+In order to build, run, and modify a NEMS application the first step
+is to check out from the appropriate "complete URL". Depending on the
+circumstances, this may be a "trunk URL" or a "branch URL". Also, you
+may currently run into a variety of "base URL" options. Assuming that
+NEMS application ABC was set up according to the conventions outlined
+in the previous section, the "trunk URL" will look like this:
+
+* https://svnemc.ncep.noaa.gov/projects/ABC/trunk
+
+However, currently no applications exist YET directly under the
+/projects level, instead most applications are located under the NEMS
+project at /projects/nems/apps. This directory was introduced as a
+temporary staging area for NEMS application. For example the "trunk
+URL" of the current UGCS-Seasonal application is
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/trunk
+
+It has been the plan for a while to move away from the NEMS project in
+the short term for the staging area. Instead a more generic location,
+e.g.  /projects/nems-X/apps, should be created.  However, this
+transition has not yet happened.  In the long run, sanctified NEMS
+applications should end up with a "base URL" directly under /projects.
+
+A few remarks:
+
+* The following instructions apply no matter what the current "base URL" of the NEMS application looks like. 
+
+* It is assumed that the application liaison has provided you with the
+  appropriate "complete URL" for the work you are planning to
+  perform. This may be a "trunk URL", or a "branch URL". Either way,
+  the following instructions apply.
+
+* In order to keep the instructions general the string
+  `(complete-URL)` is used to stand for the complete URL you were told
+  to use.
+
+ * The string ABC is used to stand for the name of the application.
+
+ * The string REV is used to stand for a specific SVN revision number.
+
+Checking out and building a specific version of the application code
+
+    svn co -r REV (complete-URL) ABC
+    cd ABC
+    ./NEMS/NEMSAppBuilder
+
+This will check out the source code into directory ABC, and build the
+application. It is up to the application liaison to ensure that the
+complete URL provided is appropriate. In most cases this means that
+it is strictly versioned, i.e. each of the constituent components
+pulled in via SVN externals are referenced with specific revision
+numbers. This typically also means that you received a revision number
+REV together with the complete URL.
+
+It the case of the UGCS-Seasonal application, step 1. above will
+currently check out a directory structure similar to this:
+
+* UGCS-Seasonal
+  + NEMS --(svn:externals)--> rev on EMC subversion trunk
+  +  GSM  --(svn:externals)--> rev on EMC subversion trunk
+  +  NMM  --(svn:externals)--> rev on EMC subversion trunk
+  +  CICE     --(svn:externals)--> rev on GitHub
+  +  CICE_CAP --(svn:externals)--> rev on GitHub
+  +  MOM5     --(svn:externals)--> rev on GitHub
+  +  MOM5_CAP --(svn:externals)--> rev on GitHub
+
+
+Note that if you checked out with a specific revision REV, it is
+generally unsafe to do an `svn update` from with the UGCS-Seasonal
+directory. The update will move the code to the current head of the
+checked out complete URL. This also means it will update the SVN
+external links to whatever revision the head version is pointing to.
+
+### Checking out, modifying, committing application code
+
+While it is important to stay with the specific revision of the
+application code (including the constituent components) when
+validating specific version of the software, a head version is
+required when doing development work. The reason is that code
+modifications can only be committed back to the head, not to a
+specific revision.
+
+There are two scenarios to cover: 1) the current application head is
+not strictly versioned, or 2) the current application head is strictly
+versioned.
+
+1. The convention for the UGCS-Seasonal application is to not strictly
+   version the head of the application trunk during development
+   phases. This means that under most circumstances if you update to
+   the head of the trunk URL of UGCS-Seasonal, all of the SVN
+   externals will be pointing to the constituent's head, without
+   specific revision number. This allows development on the head for
+   each component. When ready, and tested, code changes can easily be
+   committed by `svn commit` from within each affected constituent
+   component subdir. It is also safe to execute `svn update` from
+   within constituent subdirectories.
+
+   Only snapshot revisions of the UGCS-Seasonal trunk are strictly
+   versioned, however, they are followed immediately by a commit that
+   removes strict versioning again from the head. This allows us to
+   provide strictly versioned revisions, while keeping the development
+   process simple.
+
+2. Applications where the head revision is strictly versioned require
+   more attention when modifying code and commit code
+   changes. Executing ?svn update? directly under a strictly versioned
+   application head will always bring the constituent components back
+   to the revision specified by the SVN externals!
+
+   The constituent components can still be moved (individually) to
+   their head revision by changing into the respective subdirectly and
+   executing `svn update` from within. This then allows local code
+   modifications to be committed. However, an update on the
+   application level will revert the subdirectory back to the fixed
+   revision according to the SVN external definition. This can be very
+   confusing.
+
+### Proposed Processes for Coordinating Software Across NEMS
+
+Link to proposal for code, documentation, and script changes from Sam Trahan. 
+
+It is assumed that external components that do not support EMC or
+community access have a local, mirrored repository at EMC.
+
+A proposed central rule is that before (or after) a major release or
+delivery, an application should merge its changes back to all
+constituent component project repositories at EMC, including NEMS
+itself. This is understood to be a minimum; it may be more
+frequent. Additionally, after a release or delivery, a strongly
+versioned tag should be created that includes specific revisions for
+all constituent components.
+
+All components have EMC points of contact and native points of
+contact. The points of contact should define rules for creating
+branches for their component. They should also work out a plan and
+process for migrating changes back to the native component repository,
+if it is outside of EMC.
+
+Questions about process for each component should be directed to both the EMC point of contact and the native point of contact.
+
+Coding standards
+================
+
+\note The proposed standards here need to be reconciled with
+Environmental Equivalence 2. The section should be updated to reflect
+the standards that NCEP/EMC intends to follow.
+
+The following table specifies coding requirements and recommendations
+for a parameterization to be included in CCPP. The intent is to
+promote readability, robustness, and portability without being too
+onerous. The Kalnay rules and work by the NUOPC Physics
+Interoperability Team and EMC personnel had a major impact in creating
+this list. The GSM coding standards described at
+
+* https://svnemc.ncep.noaa.gov/trac/gsm/wiki/GSM%20code%20standards
+
+were taken into account and incorporated as applicable. Unless
+specified otherwise, the Fortran programming language is assumed.
+
+\note Move the GSM coding standards to a public area
+
+
+### CS1: Fortran Implicit None
+
+All fortran modules and subroutines will contain `implicit none`
+
+**Type**: Requirement
+
+**Source**: GMTB, GSM
+
+**Reason**:
+
+* Assists in writing bug-free code.
+
+* Understanding implicit type rules is difficult and arcane.
+
+* Understanding where a variable comes from (local, input argument
+  list, module) is more difficult with implicit typing
+
+
+### CS2: Fortran \c Intent Attribute
+
+All arguments to subprograms will contain the `intent` attribute
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**:
+
+* Assists readers in understanding whether a variable is:
+  * read-only: intent(in)
+  * read/write: intent(inout)
+  * effectively uninitialized: intent(out)
+
+* A compiler error will result if code attempts to use a variable
+  differently than specified in its \c intent.
+
+* Declared variables without the \c intent attribute can be understood
+  to be local.
+
+
+### CS3: Fortran 2008 Standard Compliance
+
+No modules or subroutines will violate the Fortran 2008 standard
+
+**Type**: Requirement
+
+**Source**: GMTB
+
+**Status**: Undetermined
+
+**Reason**:
+
+* Makes porting to a new compiler easier to near trivial.
+
+* Example: gfortran by default enforces the standard that free-form
+  source lines will not exceed 132 characters. Some compilers by
+  default allow line lengths to exceed this value. Attempts to port
+  codes with line lengths greater than 132 may encounter difficulty.
+
+
+### CS4: Inline Documentation of Variables
+
+All local and argument list variables will have a comment explaining
+the meaning of the variable. An in-line comment on the declaration
+line is sufficient
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Status**: Undetermined
+
+**Reason** Allows readers unfamiliar with the code to more quickly
+  understand how the code works.
+
+
+### CS5: Documenting Fortran Subprograms and Modules
+
+All modules and subprograms will have a documentation block describing functionality
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**: Promotes understanding of algorithms and code structure by new users
+
+
+### CS6: No Fortran Common Blocks
+
+Common blocks are disallowed
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**: Deprecated Fortran feature. Modules provide all the
+  functionality of common blocks plus much more.
+
+
+### CS7: Compatible with GNU gfortran
+
+A package must be compilable with the gfortran compiler (or gcc for
+packages coded in C). Runnability and validation can be provided using
+whatever compiler(s) the developer prefers.
+
+**Type**: Requirement
+
+**Source**: GMTB
+
+**Reason**: gfortran (and gcc) is free and ubiquitous, and therefore
+  is an ideal choice for canonical compiler.
+
+
+### CS8: Free-Form Fortran
+
+All Fortran source will be free-form
+
+**Type**: Requirement
+
+**Source**: GMTB
+
+**Reason**:
+
+* Fixed-form source is hard to read and archaic.
+
+* A 72-column requirement only makes sense for punch cards.
+
+
+CS9: Fortran-Callable
+
+All public subprograms will be Fortran-callable
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**:  Fortran is the most commonly used language for geophysical models.
+
+
+### CS10: Thread-Safe Parameterizations
+
+All parameterizations must be thread-safe (except for initialization
+and finalization methods)
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**:
+
+Many geophysical numerical models are threaded these days, and need to
+be able to invoke physical parameterizations simultaneously from
+multiple threads.
+
+Example code which is NOT thread-safe: Declare a variable `first` and
+initialize it to .true. Then test its value and set some static
+variables if it is .true. This will likely result in wrong answers
+when run in threaded mode.
+
+Solution: Provide an initialization routine which sets the static
+variables outside of threaded regions.
+
+Wikipedia provides a brief overview of thread-safety: 
+
+ * https://en.wikipedia.org/wiki/Thread_safety
+
+
+### CS11: No parameterization will contain a `stop` or `abort` clause
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**: If an error condition arises, it is better to set a flag
+and let the caller decide how to handle the condition.
+
+**Status**: Not yet implemented.
+
+
+### CS12: Use of uninitialized variables is disallowed
+
+**Type**: Requirement
+
+**Source**: GMTB, GSM
+
+**Reason**: Readability.
+
+Not all compilers can be made to initialize static or stack variables
+to a known value (e.g. zero).
+
+
+### CS13: All array indices must fall within their declared bounds.
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**: Debuggers will fail when "tricks" are employed which
+  reference arrays outside of their declared bounds.
+
+
+### CS 14: Self-Reproducible Parameterizations
+
+Multiple runs of the same compiled parameterization given identical
+input must produce identical output.  In the case where randomness is
+part of the parameterization, a method must be provided to invoke the
+same random sequence for test reproducibility.
+
+**Type**: Requirement
+
+**Source**: GMTB, NUOPC PI Team, GSM
+
+**Reason**: Prevents inadvertent errors.
+
+
+### CS15: Do Not Set Fortran Default Precisions
+
+The use of compiler flags specifying default precision is
+disallowed. For example, if 64-bit precision is required, use the
+`kind=` attribute to specify the precision rather than a compiler flag
+such as \c -r8
+
+**Type**: Requirement
+
+**Source**: GMTB
+
+**Reason**: The behavior of flags is compiler-specific, e.g. if the
+  user specifies `real*4 ` does the `-r8` compiler flag
+
+
+### CS16: List Public Entries in Fortran `module use` Statements
+
+With the exception of common libraries which use a well-defined naming
+standard for variables and subroutines, all `module use` statements
+must explicitly state which public entities will be referenced. The
+MPI library is an example of an acceptable exception: All MPI routines
+start with `MPI`, so a blank `use mpi` statement is acceptable.
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**: Assists in understanding where various variables and/or
+functions or subroutines are defined.
+
+
+### CS17: No Debugging Code
+
+All code intended for debugging purposes only should be removed prior
+to submission for inclusion
+
+**Type**: Recommended
+
+**Source**: GMTB, GSM
+
+**Reason**: Readability
+
+
+### CS18: Deallocate Allocated Arrays
+
+All arrays explicitly allocated, must be deallocated when no longer
+needed
+
+**Type**: Requirement
+
+**Source**: GMTB, GSM
+
+**Reason**: Readability. Minimize memory usage.
+
+
+### CS19: Default Visibility is `private` 
+
+The default visibility rule for module variables and procedures should
+be `private` (specified by a single `private` statement near the
+beginning of the module). The `public` attribute is applied to only
+those entities which are needed by other subprograms or modules.
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**: Limiting variable and subprogram scope is good programming practice
+
+
+### CS20: Consistent Case in Fortran Code
+
+Consistent use of case is preferred for Fortran code (text strings excepted).
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**: While Fortran is a case-insensitive language, variable
+`aBc` should also be expressed that way, and not `aBc` in one place,
+`abc` in another, and `ABC` in another.
+
+
+### CS21: Parameterization Steps
+
+A parameterization should contain `init`, `run`, and `finalize`
+methods. The `run` method must be thread-safe.
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**: Promotes separation of activities which must be done only once at startup or shutdown, from those which are done on multiple time steps.
+
+
+### CS22: Parameterizations Invoked in Chunks
+
+Parameterizations should be able to be invoked in "chunks", where the
+calculations are independent of the fastest-varying subscript.
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**:
+
+Computational performance is the main reason for this preference. Many
+physical parameterizations in geophysical models contain a dependence
+in the vertical, which means this dimension is unavailable for
+vectorization. Vectorization can provide up to a 16X speedup on modern
+processors.
+
+Example: Outer loop over vertical index `k` can contain vertical
+dependence, but if there is also an inner loop over horizontal index
+`i` that can be vectorized, the code is likely to run much more
+efficiently.
+
+
+### CS23: Don't `GOTO`
+
+The use of `GOTO` is strongly discouraged, except where no better
+option is available.
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason** Modern languages provide better mechanisms to accomplish
+  the same goal in most cases.  `GOTO` promotes "spaghetti" code,
+  which can be unreadable.
+
+
+### CS24: Nested Scope Indentation
+
+Code and declarations within subprograms, loops, and conditional tests
+should be indented. Indenting by 2 or 3 or 4 columns is reasonable
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**: Readability. Particularly important for multiply nested
+  loops and/or `if` tests.
+
+
+### CS25: Use Symbolic Fortran Comparison Operators
+
+Test operators `\<`, `\<=`, `>`, `>=`, `==`, `/=` are preferred
+vs. their deprecated counterparts `.lt.`, `.le.`, `.gt.`, `.ge.`,
+`.eq.`, `.ne.`
+
+**Type**: Recommended
+
+**Source**: GMTB, GSM
+
+**Reason** The modern constructs are easier to read, and more
+  understandable for those unfamiliar with legacy code.
+
+
+### CS26: No Bare Constants
+
+The use of bare constants (e.g. `2.7`) inside of computational regions
+is strongly discouraged. Instead, a named constant (e.g. `some_variable
+= 2.7`) should be declared at the top of the routine or module, along
+with an in-line comment stating its purpose
+
+
+**Type**: Recommended
+
+**Source**: GMTB
+
+**Reason**:
+
+Bare constants buried in code is one of the biggest contributors to
+lack of readability and understanding of how code works. "What the
+heck does `2.7` mean???" In addition, using a named constant makes it
+easier to specify precision, e.g. `real*8 some_var = 35.`
+
+
+Documentation Management
+========================
+
+Goals, Requirements and Expectations
+------------------------------------
+
+In the chart below, OP stands for Operating Principles.
+
+
+### D1: Well-Maintained Documentation
+
+Maintain comprehensive, accessible, and up-to-date documentation for
+users, developers, managers, and other stakeholders.
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Reason**:  Documentation is necessary to operate NEMS.
+
+**Status**: Completed initial survey.
+
+
+### D2: Avoid Duplication of Documentation
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, avoid duplication.
+
+**Status**: No checks in place.
+
+
+### D3: Documentation Delivered with Software
+
+Documentation should be generated at the time of development and is
+considered part of any software delivery.
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Status**: No checks in place.
+
+**Reason**:  Encourages timely and accurate documentation.
+
+
+### D4: Example Code should be in Regression Tests
+
+Any code snippets or code examples in the documentation should be
+linked via documentation generation tools to actual code that is
+included in regular regression testing.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**:  Minimize the maintenance burden.
+
+**Status**: Some implementation, no checks in place.
+
+
+### D5: Public Visibility of Documentation
+
+Documentation that does not present a security concern should be publicly visible. 
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, open for viewing.
+
+**Status**: No general agreement.
+
+
+### D6: Documentation is Accessible
+
+Documentation should be accessible to collaborators for contributions.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, open for participation.
+
+**Status**: No general agreement.
+
+
+### D7: Components are Documented
+
+ All NEMS system components (e.g. model components, physics library
+ components, workflow components) should have general documentation,
+ user documentation, technical documentation, and, as applicable,
+ scientific references. We define general documentation to include at
+ least a high-level description of the software and its purpose. User
+ documentation includes how to checkout, build and run but not
+ necessarily modify code. Technical documentation is intended for
+ people who need to understand, develop, and change technical code.
+
+**Type**: Requirement
+
+**Source**: Proposesd by OAS-COLA
+
+**Reason**:
+
+User documentation should include how to checkout, compile, install,
+configure, run, and analyze outputs of the system. This includes
+information on how to perform common tasks, such as changing model
+grids and resolutions. Installation documentation should include
+detailed itemization of supported compiler(s) (version(s)), MPI
+implementation(s) (version(s)), batch system(s) and operating
+system(s).  It should also include a step-by-step description for
+installation of dependent libraries (e.g. ESMF) and data sets.
+
+**Status**:
+
+Initial survey completed.
+
+
+### D8: Documentation Versioned with Code
+
+Scientific, user, and technical documentation should be clearly
+associated with a version of code. This can be done in a maintainable
+manner via documentation generation tools.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**: Versioning is critical for understanding and accuracy.
+
+**Status**: Implementation started.
+
+Tools and Options: Preparation of Documentation
+-----------------------------------------------
+
+###Doxygen
+
+This is a popular package used by HWRF. Here is one Doxygen example:
+
+ * http://www.dtcenter.org/HurrWRF/users/support/hwrf_docx/html/index.html
+
+The HWRF developers website is at
+
+ * http://www.dtcenter.org/HurrWRF/developers
+
+By clicking on the various tabs on the left, you can get to the code
+management document (which basically is a top SVN repo with all 8
+components pulled in as SVN externals, similar to the proposed NEMS
+structure). Links to various documentation, including some created
+with DOxygen, Latex, and Word are at
+http://www.dtcenter.org/HurrWRF/users/docs.
+
+### Protex
+
+This is a Perl script developed at NASA, used by ESMF, NASA, CESM and
+other modeling and infrastructure groups. There is a copy included in
+the ESMF source:
+
+ * http://sourceforge.net/p/esmf/esmf/ci/master/tree/scripts/doc_templates/templates/protex
+
+Protex commands are placed within comments in source code files. These
+commands extract and format sections of the source and embed them in
+latex documents. This is used to generate documentation of APIs, code
+examples, etc. This documentation is updated automatically as the
+source code evolves, which satisfies requirements such as D4. An
+example is the ESMF Reference Manual - all of the Class API sections
+are pulled from the source code using Protex:
+
+ * http://www.earthsystemmodeling.org/esmf_releases/public/last/ESMF_refdoc/node1.html
+
+Its main advantage is that it is very simple and easy to understand
+and customize, and it creates both text and web
+documents. Disadvantages are that it is plain-looking, especially on
+the web, and creating web pages requires latex2html or similar.
+
+### Sphinx
+
+Sphinx is a Python-based package that creates documentation that is
+more polished-looking than Protex. The ESMF team is using it to
+document its ESMPy and OpenClimateGIS packages.
+
+Documentation Current Practice and Recommended Evolution
+--------------------------------------------------------
+
+\note Management review is required for the approach to developing and
+maintaining technical documentation. An approach is also needed for
+organizing documentation overall.
+
+A procedure has been proposed by the NEMS documentation lead, Valbona Kunkel.
+
+At the current time, an outline of the documentation that is available
+and desired for NEMS and its applications is being collected on 
+[this spreadsheet] (https://docs.google.com/spreadsheets/d/1CLT66uzJrjrsY-um0jB5hU-Gfeh3_VCIJDA4-Ibmu5s/edit#gid=0).
+The spreadsheet serves as a survey and gap analysis, and
+is intended as a first step in the assembly of comprehensive,
+accessible NEMS documentation. Next steps may include:
+
+* disseminate the spreadsheet NGGPS-wide so that other can identify
+  additional documentation needed or available
+
+* develop strategies for linking code with documentation to satisfy D4
+
+* restructure and organize documentation into documents and on
+  websites so that it is increasingly standardized and easier to find
+
+\note Move spreadsheet to public area if needed.
+
+Input Data Management
+=====================
+
+Goals, Requirements and Expectations
+------------------------------------
+
+ at note In the chart below, OP stands for Operating Principles.
+
+
+### ID1: Maintain a single source of input data
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, avoid duplication.
+
+**Status**: No policy in place.
+
+
+### ID2: Minimize the chances of inadvertent modification of input data.
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Reason**: OP, formalize sharing.
+
+**Status**: Not implemented.
+
+
+### ID3: Easy to Identify and Obtain Input Data for a Configuration
+
+Make it as easy as possible to identify what input data is required
+for a configuration and to obtain that data and copy it to a new
+location.
+
+**Type**: Goal
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, engage through clarity..
+
+**Status**: Not implemented.
+
+
+### ID4: Input Data Not in Personal Directories
+
+Input data should be held in project and shared directories or
+repositories and not in personal directories.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**:  OP, formalize sharing.
+
+**Status**: Not implemented.
+
+
+### ID5: Verifiable Input Data 
+
+A timestamp or checksum approach should be adopted for input data
+files to help avoid confusion when files are updated.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**: None
+
+**Status**: Not implemented
+
+
+### ID6 Metadata provenance should be standardized for input data
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**: None
+
+**Status**: Not implemented.
+
+
+Input Data Management Current Practice and Recommended Evolution
+----------------------------------------------------------------
+
+\note  This section requires management review and concurrence.
+
+\warning This section contains incorrect or incomplete information,
+and should be ignored until it is updated.
+
+A proposal for interim NEMS data management is included below. The
+motivation for implementing such an interim strategy is to satisfy the
+input data design goals above during development of NEMS
+applications. These requirements are not satisfied by the current ad
+hoc approach, which slows down and can disrupt development.
+
+### Standard NEMS Data Dir
+
+On each system that we support, we determine a location for a "public"
+NEMS data directory. This data directory is shared by all different
+compsets, and revisions of them. In other words, if you are on Theia,
+you know where this data directory lives, and same on yellowstone. Any
+input files used by NEMS is located within this data dir. Symbolic
+links are allowed within the data dir.
+
+The location of the platform specific NEMS data dir is hard-coded into
+the NEMSCompsetRun and documented. It is in general not to be changed.
+
+In subsequent sections the NEMS data dir will be referred to as NEMSDATADIR.
+
+### Compset subdirs
+
+The first sub directory level under NEMSDATADIR will reflect the
+compsets that we support. So for example there will be a
+20150401_nems_gsm_cice_mom5 subdir.
+
+### Version subdirs
+
+Under each compset subdir, there will be subdirs that indicate
+versioning, like v001, v002, ... so on. This level is used by each
+compset independent of other compsets to handle the fact that input
+data (and regression output) will change during development.
+
+### Component subdirs
+
+The final subdir structure contains a subdir for each component that
+is part of the compset. E.g. for 20150401_nems_gsm_cice_mom5 looks
+like this:
+
+  * NEMS   (mediator files)
+  
+  * GSM
+
+  * CICE
+
+  * MOM5
+
+  * REGRESSION_BASELINE
+
+Finally, the NEMSCompsetRun will copy files in the above subdirs to
+the RUNDIR when setting up the run. Except for GSM, where we are still
+depending on the native scripts, but we point them to files in the GSM
+subdir.
+
+Once the above is implemented and documented, it should be more
+straightforward to set up the input files for a specific compset, or
+version thereof. The directory structure depends on the compset name,
+and the version number. Once those two pieces are known, everything
+else is fixed, which means that the software knows what to do, and a
+user can easily figure out where to look to find the files.
+
+Output Data Management
+======================
+
+\note This section requires management review and
+concurrence. Preparation of a plan for the estimation, storage and
+sharing of outputs by the community is one of the actions identified
+in the CDDM workshop on Sept 1-2, 2016 ("Community Data Access Plan").
+
+\note Link to the Community Data Access Plan document
+
+Goals, Requirements and Expectations
+------------------------------------
+
+In the chart below, OP stands for Operating Principles.
+
+
+### OD1: Model Outputs Available to Non-EMC Developers
+
+Model outputs must be available on a platform that is open to
+developers and analysts collaborating with EMC.
+
+**Type**: Requirement
+
+**Source**: COLA
+
+**Reason**: OP, avoid duplication.
+
+**Status**: No policy in place.
+
+
+### OD2: Use Standard Data Formats
+
+Model outputs should be suitable for analysis by the usual tools
+(eg. NCL, GrADS, Python-based tools) That implies that they must use
+standard data formats (netCDF, GRIB) and possibly satisfy
+CF-compliance or even ESGF-compliance.
+
+**Type**: Requirement
+
+**Source**: OAS/COLA
+
+**Reason**: OP, formalize sharing.
+
+**Status**: Not implemented.
+
+
+### OD3: Model Outputs in Single Space or Transparently Connected Spaces
+
+Model outputs should be stored in a single space, or
+easily/transparently connected multiple spaces.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**:  Ease of analysis.
+
+**Status**: Not implemented.
+
+
+### OD4: Metadata with Model Outputs
+
+Model outputs must be stored with metadata sufficient to reproduce the
+runs that generated them.
+
+**Type**: Requirement
+
+**Source**: Proposed by OAS
+
+**Reason**  Ease of analysis.
+
+**Status** Not implemented.
+
+
+
+Output Data Management Current Practice and Recommended Evolution
+-----------------------------------------------------------------
+
+This is a proposal for the organization of common UCGS Seasonal
+results repository. The intent is to facilitate the exchange of
+information and ideas and to evaluate the evolution of the coupled
+system in its many configurations.  As with other large scale coupled
+system projects, such as CMIP5, a certain minimum amount of
+organization is needed and can quickly become overwhelming in its
+structure.  In CMIP5 mostly unprocessed model output is provided.
+Here, even if adequate space can be found, some further processing
+will also be needed to reduce repeated processing by individuals and
+speed getting out key points and ideas. Thus while this will start
+with a similar structure to CMIP5, some key distinctions will arise:
+
+* Any model or processed data need not comply with the strict CMIP5
+  requirements, but must still have enough metadata so that
+  non-proprietary software can examine it.  Since models being used
+  here produce some form of GRIB or NetCDF, this should be enough
+  provided the metadata are accurate.
+
+* Some reduction in the 4-dimensional domain (space + time) will
+  usually be needed on a per variable basis to aid in examination.
+  Vector and tensor components could be stored jointly for easier
+  display and calculation.
+
+* In addition to data graphs, charts, maps and pictures and animations
+  can also be separately included.  All should be viewable with
+  standard browser software.
+
+Results organization will follow CMIP5 notation at the top level with
+individual realms each getting a separate directory.  Here a realm
+refers to an entire portion of the climate system an is usually
+associated with at least one modeling component.  At present the land
+realm is found within the atmosphere model component, but may
+eventually be separated.  Other applications, such a regional
+forecasts, could have a similar division.  The same realm may have
+several separate model components, such as MOM and HYCOM for the
+ocean.  Additional adjunct components such as atmosphere chemistry or
+space weather, should have their results placed in the appropriate
+realm for now.  These may be separated later if needed.  In addition
+to the four standard CMIP5 directories of atmosphere, ocean, land and
+sea ice, a fifth multipurpose directory, labeled X, has also been
+added.  This will be detailed later.  Figure 2 shows the basic top
+level arrangement.
+
+The arrangement of items within each of the first four directories
+will be similar.  The atmosphere directory will be detailed as an
+example.
+
+\image html cmip5-arrangement.png "Figure 2"
+
+As with CMIP5 the next level will be variable.  This will permit
+easier comparison of the same variable from different experiments.
+Since each variable can be examined in many ways and since some
+reduction of the model data will be done to further assist in the
+examination, some added directory levels are needed.  Typically as
+mentioned some domain reduction is done.  This will often be some
+sample or simple average of the 4-dimensional data to 2 dimensions.
+This would consist of individual levels (atmospheric pressure, height,
+model level, potential temperature, etc.) or surface (ground, top of
+atmosphere, etc.), some cross section, usually in latitude or
+longitude, or some time evolution with a single spatial coordinate.
+Within each of these basic 2-d slices, further reduction may be done.
+For horizontal slices, global or different regional reductions can be
+done.  Further down, instantaneous samples or some time averaging
+combinations can down.  Within each of these results from different
+revisions would be placed.  These revisions, representing highly
+specific model configurations and full input and output dataset and
+setting, can consist of any combination of data, graphs, charts, maps
+or animations.  Similar reductions would be possible with the other
+subdomains.
+
+Ideally the placement of the results from the same variable and
+revision in different directories would be uniquely named so that if
+copied from the repository, they can be identified as to what they
+are.
+
+When analysis requires statistics of the one variable beyond the
+simple slicing and averaging, a directory parallel to the subdomain
+directory will contain these results.  These would include statistics
+between revisions.  In addition to the graphs, charts, maps and
+animations a separate directory for tabular data is present.  This
+could include Excel files or comma separated variables (.csv) files to
+allow spreadsheet examination.  If needed, a subdomain directory could
+also be included here for assist in arranging results.  Similarly,
+parallel to all the variable directories is a multivariate statistical
+directory to examine covariances and similar multivariate
+relationships.  It would be arranged similar to the univariate
+directories.  Figure 3 shows an idea of how these directories are
+related.
+
+\image html directory-idea.png "Figure 3"
+
+
+
+
+
+The multipurpose (X) directory has been added to deal with some
+special needs:
+
+* E(X)change of data between realms (components).  This would include
+  input and output fields from the mediator, plus the same fields as
+  seen by the components if needed.  These would at least start with
+  the unprocessed fields for human review.  Problem fields and
+  locations could be highlighted with suitable graphics.  As with the
+  other realms, individual variables would be held plus any
+  statistics.
+
+* Results from cross(X)-realm evaluations would also be placed here.
+  This would include such plots as atmosphere height correlated with
+  SST anomaly.
+
+* To expedite getting information out when a suitable pre-specified
+  directory cannot be determined, a temporary directory (or perhaps
+  permanent if truly unclassifiable) can be set up here to place
+  results of unknown (X) assignment.
+
+Release Management for NEMS Applications
+========================================
+
+This is a checklist of tasks for preparing a new release of a NEMS
+Application.  A release includes making available a revision (and tag)
+of a NEMS Application as well as accompanying documentation.
+
+### Preparing the Snapshot Revision
+
+1. Ensure regression tests are passing in all affected applications.
+   Changes to common infrastructure such as the Mediator,
+   NEMSAppBuilder and NEMSCompsetRun scripts affect multiple
+   applications.
+
+2. Prepare the snapshot revision by strictly versioning all
+svn:externals:
+
+   a. In your application root directory edit the local svn:externals
+      property, strictly versioning all constituent components
+      (including NEMS itself).  To determine the strict revision
+      numbers, go into the directory of each component, execute svn
+      info and find the last change revision number.
+
+   b. \c "$ svn propedit svn:exernals ."
+
+   c. Commit these application svn:externals changes (and potentially
+      other changes in the *.appBuilder or *.compsetRun files) back to
+      the application repo. Take note of the revision number of this
+      commit. This is the revision number of the snapshot revision of
+      the application.
+
+   d. Optionally "svn copy" the application trunk (to which you just
+      committed) to the application tags directory with a descriptive
+      tag name.
+
+   e. Starting with this step, the rest of the steps ensure that your
+      development sandbox (and those of others, when they execute svn
+      update) is reset to no longer use strict versioning.  Edit the
+      local svn:externals property, setting all constituent components
+      to float back to the head of their appropriate branch.
+
+   f. Commit the new svn:externals and update.  Those checking out the
+      trunk of the NEMS application will receive the head of the
+      development branches of all constituent components.  Development
+      continues in this mode until it's time to make another snapshot.
+
+Steps for Component NUOPC Caps
+------------------------------
+
+1. If the development involved any [Earth System Prediction Suite] (https://www.earthsystemcog.org/projects/esps/)
+   components, and during the development for the release changes were
+   made to the NUOPC cap, or if a new version of the model was used,
+   then it should be documented in the ESPS tables.  Ideally, this
+   would include any new test reports, compliance output, and an
+   updated description of the cap.  See HYCOM as an example in the
+   [ocean components table] (https://www.earthsystemcog.org/projects/esps/ocean_models)
+
+2. Updated components and their NUOPC caps should be pushed back to
+   their home repository, and those revision numbers documented.
+   Since revision numbers are repository specific, a component and its
+   cap will have multiple revision numbers, one associated with each
+   repository.  In addition, components with a NUOPC cap included in
+   the source distribution that have been formally released will have
+   a version number, e.g., 2.1.  In general, precisely documenting
+   revision numbers and versions ensures that someone else can get the
+   exact same code and reproduce previous results.
+
+Preparing the Release Documentation
+-----------------------------------
+
+\warning This section is incorrect.  It does not reflect the fact that
+documentation has been moved to the repository.
+
+Note: CoG pages are public documents.  They can be locked so that only
+project members can view a page.  A policy should be set as to when
+the pages below are made public.
+
+1. Create a new milestone release page, currently on the CoupledNEMS
+   CoG site.  Examples of these include [UGCS-Seasonal 0.1] (http://cog-esgf.esrl.noaa.gov/projects/couplednems/drev58214)  and [Regional
+   0.2] (http://cog-esgf.esrl.noaa.gov/projects/couplednems/regional02).  Here are the key content pieces that should be documented:
+
+   a. A description of the milestone
+   b. The version of each component used.  ESPS components should be linked to the component in the [ESPS tables] (https://www.earthsystemcog.org/projects/esps/).
+   c. The model grid of each component
+   d. Initial conditions used
+   e. All coupling field exchanges - this should be done by updating the [master field spreadsheet] (https://docs.google.com/spreadsheets/d/1X8ByKHfuHZ5x8Ta3Tqv7B0gQBRDXYMK6_yr9tmFu0QE/edit#gid=0) and making a copy of it at a new URL to freeze it.  Link the copied spreadsheet to the milestone page.
+   f. The run sequence
+   g. How the system was validated, including any plots
+   h. Limitations of the system
+
+2. Update (or create) the Build & Run page for the application.   Examples of these include [UGCS-Seasonal] (http://cog-esgf.esrl.noaa.gov/projects/couplednems/ugcs-seasonal) and [Regional] (http://cog-esgf.esrl.noaa.gov/projects/couplednems/regional).
+
+   a. Add an entry to the revisions table.  Since this revision is an actual release, link the revision number and/or tag to the milestone release page (above).
+   b. The details column should list, in bullet form, changes since the last release (if there was a previous release) or a description of the new application's features if there was no previous release.
+   c. List all compsets that can be run in the released version
+
+3. Prepare a release announcement email including a brief description of the milestone release and links to the milestone release page and the Build & Run page for the application.
+
+4. All documentation should be reviewed and approved before sending out the release notification.  The review should start a minimum of two weeks before the intended release date.
+
+5. Create a CoG news item on the CoupledNEMS site.
+
+\note Move master field spreadsheet to repo
+
+
+
+
+
+
+
+Pilot Projects
+==============
+
+NEMS Mediator in CIME
+---------------------
+
+One of the features of the NUOPC software architecture is that the
+mediator becomes an exchangeable component like an atmosphere or an
+ocean model. Like those components, it is possible for the mediator to
+be used in different coupled modeling applications.
+
+As described in the earlier Modes of Use section, it is advantageous
+to enable coupled NEMS-based modeling applications to be run by the
+community both within and outside of operational workflows. Including
+the NEMS mediator in CIME will enable a broad community to construct
+modeling applications using the NEMS mediator within the mature,
+accessible, widely used research environment developed by CESM for
+many-component coupled modeling. Components may include community
+models with NUOPC interfaces, such as HYCOM, MOM5 and CICE. They may
+also include model components from CESM, which shares many of the same
+types of components anticipated in NEMS (atmosphere, ocean, sea ice,
+land surface, wave, etc.), and NUOPC-compliant components from other
+centers.The NEMS mediator could be exercised in a wide variety of
+configurations and predictive time scales, and developed as a
+community component, with the scrutiny, increased robustness, and
+access to innovation that implies. A benefit of the collaboration for
+EMC is CIME?s relationship to a community with established research
+activities, expertise and outreach in many-component coupled modeling.
+
+Importantly, the standard NUOPC component interfaces on the mediator
+and model components maintain a link back to the operational system at
+EMC, ensuring that as advances are made they are accessible to
+operations.
+
+Figure 4 shows the kinds of infrastructure that would accompany the
+NEMS mediator in the CIME repository. CIME enables the user to
+download and begin running a complete standalone coupled test system,
+where the test components can be replaced by active components with
+compliant interfaces.
+
+Specific tasks (FY16) include:
+
+1. With direction from NOAA leadership, implement NOAA Environmental
+Modeling Center (EMC) requirements for code distribution of the NEMS
+mediator via CIME. This is anticipated to include a download
+registration page and a plan for monitoring for requests for code
+access originating from ITAR proscribed countries.
+
+2. In collaboration with EMC and teams involved with NEMS development,
+establish a working, documented test system using the NEMS mediator
+with CIME data models and testing infrastructure. Demonstrate its use
+with a limited set of active components.
+
+
+\image html cime-nems-mediator.png "Figure 4"
+
+VLab Pilot Project
+------------------
+
+EMC is beginning to install EMC software in the NCEP/EMC subversion
+repository under VLab to enable community collaboration. Mark Potts is
+leading this effort.
Index: checkout/doc/DREV79954.md
===================================================================
--- checkout/doc/DREV79954.md	(nonexistent)
+++ checkout/doc/DREV79954.md	(revision 94669)
@@ -0,0 +1,116 @@
+DREV79954: Regional-Nest 0.1 Single Domain Coupling {#milestone_DREV79954}
+===================================================
+
+\date 10/12/2016
+
+Description
+-----------
+
+Regional-Nest 0.1 (DREV79954) is a one-way configuration of the
+Nonhydrostatic Mesoscale Model on the B Grid (NMMB) and a regional
+configuration of the HYbrid Coordinate Ocean Model (HYCOM).
+
+This revision has been run for 2 days using initial condition and
+boundary condition based on hurricane Patricia and exhibits behavior
+that is Earth-like. The initial condition starts at 2015 10 20 12:00
+hours. This is the starting time for HYCOM initialization and
+integration. This regional hycom has a 1/12th degree resolution with
+1284x516 data points spanning a geographical region (-179.76, 2.48) to
+(-77.12, 39.98). HYCOM works on a regular lat-lon grid over this
+geographic region. The regional NMMB grid has a single parent domain
+roughly at 1/4 degree resolution with 451x451 data points spanning a
+geographic region (-149.47, -25.71) to (-39.13, 58.9). The NMMB grid
+is a Lambertian curvi-linear grid. It can also be thought of as a
+rotated lat-lon grid.  Field Exchange
+
+Currently all fields are transferred using bilinear interpolation. The
+following flux fields are transferred between NMMB and HYCOM:
+
+| NMMB->HYCOM |   HYCOM->NMMB |
+| ----------- | ------------- |
+| latent heat flux |        sea surface temperature |
+| sensible heat flux       | &nbsp; |
+| net longwave   | &nbsp; |
+| net shortwave    | &nbsp; |
+| zonal momentum flux       | &nbsp; |
+| meridional momentum flux         | &nbsp; |
+| precipitation rate    | &nbsp; |
+ 
+Run Sequences
+-------------
+
+The coupled system starts with HYCOM integrating first using Patricia
+initial condition. Hycom ignores mediator input on its first time
+step. Mediator input is used in subsequent time steps after NMMB has
+been initialized from HYCOM SST and provides valid input in
+precipitation, radiative fluxes, heat fluxes, and momentum fluxes.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_slow
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+        @600.0
+          MED MedPhase_fast_before
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_fast_after
+        @
+      @
+    ::
+ 
+Validation
+----------
+
+Three plots of SST ingested in NMMB from HYCOM are shown here at 01
+hr, 48 hr, and the difference between them.
+
+
+\image html DREV77954-sst_domain01_nonests_1hr.png SST received by NMMB after one hour model simulation time. By now HYCOM has run 2 time steps and sends updated SST to NMMB
+
+\image html DREV77954-sst_domain01_nonests_48hrs.png  SST received by NMMB at 48 hr of model simulation.
+
+\image html DREV77954-diff_sst_end_01.png   Difference in SST received by NMMB between 1 hour and 48 hr of model simulation time. One can also see in this diagram the outline of the regional HYCOM model overlapping with the regional NMMB model.
+
+\image html DREV77954-2way_minus_1way_diff_sst.png  Difference in SST received by NMMB at 48 hr of model simulation time between 1 way coupling HYCOM->NMMB and 2 way coupling HYCOM\<->NMMB.
+
+
+Download and Build
+------------------
+
+This revision can be downloaded with the following command:
+
+    svn co -r 79954 https://svnemc.ncep.noaa.gov/projects/nems/apps/Regional-Nest
+
+Instructions on how to download and build a NEMS application are
+discussed in the 
+ref documentation "NEMS User's Guide and Reference".
+
+The coupled system can be built with the following command after
+download is complete:
+
+    ./NEMS/NEMSAppBuilder
+
+Running the Patricia single parent domain compset
+
+Compsets that can be run with this revision are:
+
+* `patricia_nems_nmm_cice_hycom`
+
+To run compsets, start within the UGCS-Seasonal directory and execute
+the NEMS CompsetRun tool by typing:
+
+    ./NEMS/NEMSCompsetRun -compset NEMS/compsets/patricia_nems_nmm_cice_hycom
+
+Currently, the data files are only set up on Theia. The data files for HYCOM can be found at:
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/HYCOM/REGIONAL_HEP20/
+
+Data files for NMMB can be found at:
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/RT-Baselines/NMMB_patricia_nests
Index: checkout/doc/architecture.md
===================================================================
--- checkout/doc/architecture.md	(nonexistent)
+++ checkout/doc/architecture.md	(revision 94669)
@@ -0,0 +1,69 @@
+Architecture {#architecture}
+============
+
+The NEMS architecture is based on an ESMF component hierarchy with the
+application driver `MAIN_NEMS` at the top, calling into the
+`NEMS_COMP` component which in turn drives the `EARTH_COMP`
+component. The `EARTH_COMP` drives the `ATM` component (which calls
+into options `GSM, NMMB`, or `FIM`). The architecture allows for
+multiple `EARTH_COMP` instances, supporting ensemble applications such
+as the Global Ensemble Forecast System (GEFS).
+
+Coupled NEMS includes atmosphere, ocean, ice, wave, land,
+aerosol/chemistry, and hydrologic models, with coupling interface and
+utilities based on the 
+[Earth System Modeling Framework (ESMF)](https://www.earthsystemcog.org/projects/esmf/).
+The NEMS applications also utilize intreopereability conventions
+introduced by the 
+[National Unified Operational Prediction Capability (NUOPC)](https://www.earthsystemcog.org/projects/nuopc/).
+
+Key architecture features of the coupled NEMS:
+
+* Data exchanges between major model components go through a central
+  (NEMS) mediator component. There may be multiple versions of this
+  central mediator (e.g. for global and regional systems), and there
+  may be additional specialized mediators (e.g. for the 3D
+  interactions associated with space weather coupling).
+
+* The NEMS mediator component is an integral part of the NEMS
+  software. The mediator source code is managed alongside the NEMS
+  source code, is integrated into the NEMS make system and is built
+  when the NEMS executable is built.
+
+* The atmosphere models (currently GSM, FIM, NMMB) currently remain as
+  parts of the NEMS software. Their source code is managed alongside
+  the NEMS source code, they are integrated into the NEMS make system
+  and are built when the NEMS executable is built. This is likely to
+  change in the future.
+
+* Ocean, ice, wave and other models are treated by NEMS as external
+  dependencies. Their source code is managed outside of NEMS,
+  typically in the proximity of the organization maintaining the
+  official version of the model. Each model maintains its own separate
+  make system with a NUOPC compliant build option. NEMS requires that
+  the pre-built models are available when the NEMS executable is being
+  built.
+
+* All of the components driven by they EARTH_COMP are NUOPC-compliant
+  components.
+
+* All of the components driven by they EARTH_COMP can be configured to
+  run on a specific set of PETs (MPI processes), supporting concurrent
+  or sequential component execution.
+
+The coupled NEMS version extends the original NEMS version by
+implementing the EARTH_COMP level as a NUOPC compliant driver that
+accepts a NUOPC compliant mediator component and NUOPC compliant model
+components (ATMs, OCNs, ICEs, WAVs). The diagram below shows the
+architecture of the target coupled NEMS system.  Although not shown,
+recent developments in the mediator are adding separate land and
+hydrology components as siblings to ATM, OCN, ICE, and WAV.
+
+The specific OCN, ICE, WAV models are shown within clouds outside of
+the NEMS box to indicate that NEMS handles them as external
+dependencies. Data exchanges between components are handled by generic
+NUOPC_Connector components indicated by green arrows. The generic
+connectors perform basic regrid and redist operations as needed to
+take field data from one side to the other.
+
+\image html nems-calls.png
Index: checkout/doc/image/DREV73436-WAM_Field.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV73436-WAM_Field.png
===================================================================
--- checkout/doc/image/DREV73436-WAM_Field.png	(nonexistent)
+++ checkout/doc/image/DREV73436-WAM_Field.png	(revision 94669)

Property changes on: checkout/doc/image/DREV73436-WAM_Field.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-n2_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-n2_wam.png
===================================================================
--- checkout/doc/image/DREV76675-n2_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-n2_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-n2_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d01_01hrs_b.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d01_01hrs_b.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d01_01hrs_b.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d01_01hrs_b.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d01_01hrs_b.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/repo-structure.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/repo-structure.png
===================================================================
--- checkout/doc/image/repo-structure.png	(nonexistent)
+++ checkout/doc/image/repo-structure.png	(revision 94669)

Property changes on: checkout/doc/image/repo-structure.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-ewn_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-ewn_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-ewn_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-ewn_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-ewn_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-lis_t126.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-lis_t126.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-lis_t126.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_sst_d02_48hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_sst_d02_48hrs.png
===================================================================
--- checkout/doc/image/DREV90957_sst_d02_48hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_sst_d02_48hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_sst_d02_48hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/lnd2hyd_temperature_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/lnd2hyd_temperature_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/lnd2hyd_temperature_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/lnd2hyd_temperature_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/lnd2hyd_temperature_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_d02_sst_001hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_d02_sst_001hrs.png
===================================================================
--- checkout/doc/image/DREV90957_d02_sst_001hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_d02_sst_001hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_d02_sst_001hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-atm_mean_prec_rate.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-atm_mean_prec_rate.png
===================================================================
--- checkout/doc/image/DREV70089-atm_mean_prec_rate.png	(nonexistent)
+++ checkout/doc/image/DREV70089-atm_mean_prec_rate.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-atm_mean_prec_rate.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-lnd_liquid_water_content_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-lnd_liquid_water_content_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/DREV70089-lnd_liquid_water_content_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/DREV70089-lnd_liquid_water_content_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-lnd_liquid_water_content_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d02_48hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d02_48hrs.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d02_48hrs.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d02_48hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d02_48hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV53978-wamTemp104.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV53978-wamTemp104.png
===================================================================
--- checkout/doc/image/DREV53978-wamTemp104.png	(nonexistent)
+++ checkout/doc/image/DREV53978-wamTemp104.png	(revision 94669)

Property changes on: checkout/doc/image/DREV53978-wamTemp104.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-atm_liquid_water_content_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-atm_liquid_water_content_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/DREV70089-atm_liquid_water_content_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/DREV70089-atm_liquid_water_content_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-atm_liquid_water_content_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_LIS_T126-lis_t126_conus.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_LIS_T126-lis_t126_conus.png
===================================================================
--- checkout/doc/image/GRID_LIS_T126-lis_t126_conus.png	(nonexistent)
+++ checkout/doc/image/GRID_LIS_T126-lis_t126_conus.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_LIS_T126-lis_t126_conus.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-uwn_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-uwn_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-uwn_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-uwn_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-uwn_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_d03_sst_048hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_d03_sst_048hrs.png
===================================================================
--- checkout/doc/image/DREV90957_d03_sst_048hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_d03_sst_048hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_d03_sst_048hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV73436-IPE_Field.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV73436-IPE_Field.png
===================================================================
--- checkout/doc/image/DREV73436-IPE_Field.png	(nonexistent)
+++ checkout/doc/image/DREV73436-IPE_Field.png	(revision 94669)

Property changes on: checkout/doc/image/DREV73436-IPE_Field.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lon.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lon.gif
===================================================================
--- checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lon.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lon.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lon.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84205-plot-page-1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84205-plot-page-1.png
===================================================================
--- checkout/doc/image/DREV84205-plot-page-1.png	(nonexistent)
+++ checkout/doc/image/DREV84205-plot-page-1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84205-plot-page-1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-atm2lnd_regrid_mean_prec_rate.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-atm2lnd_regrid_mean_prec_rate.png
===================================================================
--- checkout/doc/image/DREV70089-atm2lnd_regrid_mean_prec_rate.png	(nonexistent)
+++ checkout/doc/image/DREV70089-atm2lnd_regrid_mean_prec_rate.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-atm2lnd_regrid_mean_prec_rate.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84205-plot-page-2.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84205-plot-page-2.png
===================================================================
--- checkout/doc/image/DREV84205-plot-page-2.png	(nonexistent)
+++ checkout/doc/image/DREV84205-plot-page-2.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84205-plot-page-2.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_CICE_gx3-lat.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_CICE_gx3-lat.gif
===================================================================
--- checkout/doc/image/GRID_CICE_gx3-lat.gif	(nonexistent)
+++ checkout/doc/image/GRID_CICE_gx3-lat.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_CICE_gx3-lat.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif
===================================================================
--- checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-nt_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-nt_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-nt_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-nt_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-nt_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_LIS_T126-lis_t126_frontrange.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_LIS_T126-lis_t126_frontrange.png
===================================================================
--- checkout/doc/image/GRID_LIS_T126-lis_t126_frontrange.png	(nonexistent)
+++ checkout/doc/image/GRID_LIS_T126-lis_t126_frontrange.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_LIS_T126-lis_t126_frontrange.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png
===================================================================
--- checkout/doc/image/DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png	(nonexistent)
+++ checkout/doc/image/DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-o2_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-o2_wam.png
===================================================================
--- checkout/doc/image/DREV76675-o2_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-o2_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-o2_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d03_01hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d03_01hrs.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d03_01hrs.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d03_01hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d03_01hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_conus.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_conus.png
===================================================================
--- checkout/doc/image/GRID_Front_Range_Regional-frontrange_conus.png	(nonexistent)
+++ checkout/doc/image/GRID_Front_Range_Regional-frontrange_conus.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Front_Range_Regional-frontrange_conus.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-reanalysis_tec_0317.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-reanalysis_tec_0317.png
===================================================================
--- checkout/doc/image/DREV87779-reanalysis_tec_0317.png	(nonexistent)
+++ checkout/doc/image/DREV87779-reanalysis_tec_0317.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-reanalysis_tec_0317.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/directory-idea.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/directory-idea.png
===================================================================
--- checkout/doc/image/directory-idea.png	(nonexistent)
+++ checkout/doc/image/directory-idea.png	(revision 94669)

Property changes on: checkout/doc/image/directory-idea.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-T574r_global.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-T574r_global.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-T574r_global.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-T574r_global.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-T574r_global.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_google_earth.jpg
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_google_earth.jpg
===================================================================
--- checkout/doc/image/GRID_Front_Range_Regional-frontrange_google_earth.jpg	(nonexistent)
+++ checkout/doc/image/GRID_Front_Range_Regional-frontrange_google_earth.jpg	(revision 94669)

Property changes on: checkout/doc/image/GRID_Front_Range_Regional-frontrange_google_earth.jpg
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_d01_sst_001hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_d01_sst_001hrs.png
===================================================================
--- checkout/doc/image/DREV90957_d01_sst_001hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_d01_sst_001hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_d01_sst_001hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-tec_0317.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-tec_0317.png
===================================================================
--- checkout/doc/image/DREV87779-tec_0317.png	(nonexistent)
+++ checkout/doc/image/DREV87779-tec_0317.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-tec_0317.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_regional.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_regional.png
===================================================================
--- checkout/doc/image/GRID_Front_Range_Regional-frontrange_regional.png	(nonexistent)
+++ checkout/doc/image/GRID_Front_Range_Regional-frontrange_regional.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Front_Range_Regional-frontrange_regional.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_LIS_SoilMoist_tavg_1.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_LIS_SoilMoist_tavg_1.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_LIS_SoilMoist_tavg_1.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_LIS_SoilMoist_tavg_1.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_LIS_SoilMoist_tavg_1.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/cmip5-arrangement.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/cmip5-arrangement.png
===================================================================
--- checkout/doc/image/cmip5-arrangement.png	(nonexistent)
+++ checkout/doc/image/cmip5-arrangement.png	(revision 94669)

Property changes on: checkout/doc/image/cmip5-arrangement.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV80567-ocnspeed_120h.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV80567-ocnspeed_120h.gif
===================================================================
--- checkout/doc/image/DREV80567-ocnspeed_120h.gif	(nonexistent)
+++ checkout/doc/image/DREV80567-ocnspeed_120h.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV80567-ocnspeed_120h.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-lnd_temperature_of_soil_layer_1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-lnd_temperature_of_soil_layer_1.png
===================================================================
--- checkout/doc/image/DREV70089-lnd_temperature_of_soil_layer_1.png	(nonexistent)
+++ checkout/doc/image/DREV70089-lnd_temperature_of_soil_layer_1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-lnd_temperature_of_soil_layer_1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlon.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlon.gif
===================================================================
--- checkout/doc/image/GRID_mom5_1deg_tripole-ocnlon.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_1deg_tripole-ocnlon.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlon.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-uwn_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-uwn_wam.png
===================================================================
--- checkout/doc/image/DREV76675-uwn_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-uwn_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-uwn_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV80567-ocnspeed_new_120h.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV80567-ocnspeed_new_120h.gif
===================================================================
--- checkout/doc/image/DREV80567-ocnspeed_new_120h.gif	(nonexistent)
+++ checkout/doc/image/DREV80567-ocnspeed_new_120h.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV80567-ocnspeed_new_120h.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-T574r_conus.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-T574r_conus.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-T574r_conus.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-T574r_conus.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-T574r_conus.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV77954-2way_minus_1way_diff_sst.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV77954-2way_minus_1way_diff_sst.png
===================================================================
--- checkout/doc/image/DREV77954-2way_minus_1way_diff_sst.png	(nonexistent)
+++ checkout/doc/image/DREV77954-2way_minus_1way_diff_sst.png	(revision 94669)

Property changes on: checkout/doc/image/DREV77954-2way_minus_1way_diff_sst.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/nems-calls.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/nems-calls.png
===================================================================
--- checkout/doc/image/nems-calls.png	(nonexistent)
+++ checkout/doc/image/nems-calls.png	(revision 94669)

Property changes on: checkout/doc/image/nems-calls.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-streamflow.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-streamflow.gif
===================================================================
--- checkout/doc/image/DREV93202-streamflow.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-streamflow.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-streamflow.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV77954-sst_domain01_nonests_1hr.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV77954-sst_domain01_nonests_1hr.png
===================================================================
--- checkout/doc/image/DREV77954-sst_domain01_nonests_1hr.png	(nonexistent)
+++ checkout/doc/image/DREV77954-sst_domain01_nonests_1hr.png	(revision 94669)

Property changes on: checkout/doc/image/DREV77954-sst_domain01_nonests_1hr.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-nt_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-nt_wam.png
===================================================================
--- checkout/doc/image/DREV76675-nt_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-nt_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-nt_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV77954-diff_sst_end_01.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV77954-diff_sst_end_01.png
===================================================================
--- checkout/doc/image/DREV77954-diff_sst_end_01.png	(nonexistent)
+++ checkout/doc/image/DREV77954-diff_sst_end_01.png	(revision 94669)

Property changes on: checkout/doc/image/DREV77954-diff_sst_end_01.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0316.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0316.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0316.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0316.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0316.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0317.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0317.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0317.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0317.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0317.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0318.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0318.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0318.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0318.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0318.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-o_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-o_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-o_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-o_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-o_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0319.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0319.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0319.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0319.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0319.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_global.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Front_Range_Regional-frontrange_global.png
===================================================================
--- checkout/doc/image/GRID_Front_Range_Regional-frontrange_global.png	(nonexistent)
+++ checkout/doc/image/GRID_Front_Range_Regional-frontrange_global.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Front_Range_Regional-frontrange_global.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-nwn_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-nwn_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-nwn_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-nwn_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-nwn_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d03_48hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d03_48hrs.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d03_48hrs.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d03_48hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d03_48hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif
===================================================================
--- checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_d01_sst_048hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_d01_sst_048hrs.png
===================================================================
--- checkout/doc/image/DREV90957_d01_sst_048hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_d01_sst_048hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_d01_sst_048hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-n2_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-n2_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-n2_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-n2_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-n2_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126_frontrange.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126_frontrange.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-lis_t126_frontrange.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-lis_t126_frontrange.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-lis_t126_frontrange.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV93202-shade_LIS_Qair_f_tavg_1.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV93202-shade_LIS_Qair_f_tavg_1.gif
===================================================================
--- checkout/doc/image/DREV93202-shade_LIS_Qair_f_tavg_1.gif	(nonexistent)
+++ checkout/doc/image/DREV93202-shade_LIS_Qair_f_tavg_1.gif	(revision 94669)

Property changes on: checkout/doc/image/DREV93202-shade_LIS_Qair_f_tavg_1.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0320.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0320.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0320.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0320.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0320.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_CICE_gx3-lon.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_CICE_gx3-lon.gif
===================================================================
--- checkout/doc/image/GRID_CICE_gx3-lon.gif	(nonexistent)
+++ checkout/doc/image/GRID_CICE_gx3-lon.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_CICE_gx3-lon.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV87779-ipe_tec_0321.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV87779-ipe_tec_0321.png
===================================================================
--- checkout/doc/image/DREV87779-ipe_tec_0321.png	(nonexistent)
+++ checkout/doc/image/DREV87779-ipe_tec_0321.png	(revision 94669)

Property changes on: checkout/doc/image/DREV87779-ipe_tec_0321.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d02_01hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d02_01hrs.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d02_01hrs.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d02_01hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d02_01hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-ewn_nam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-ewn_nam.png
===================================================================
--- checkout/doc/image/DREV76675-ewn_nam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-ewn_nam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-ewn_nam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV89738-validate-1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV89738-validate-1.png
===================================================================
--- checkout/doc/image/DREV89738-validate-1.png	(nonexistent)
+++ checkout/doc/image/DREV89738-validate-1.png	(revision 94669)

Property changes on: checkout/doc/image/DREV89738-validate-1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV89738-validate-2.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV89738-validate-2.png
===================================================================
--- checkout/doc/image/DREV89738-validate-2.png	(nonexistent)
+++ checkout/doc/image/DREV89738-validate-2.png	(revision 94669)

Property changes on: checkout/doc/image/DREV89738-validate-2.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-atm2hyd_regrid_inst_down_lw_flx.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-atm2hyd_regrid_inst_down_lw_flx.png
===================================================================
--- checkout/doc/image/DREV70089-atm2hyd_regrid_inst_down_lw_flx.png	(nonexistent)
+++ checkout/doc/image/DREV70089-atm2hyd_regrid_inst_down_lw_flx.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-atm2hyd_regrid_inst_down_lw_flx.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_LIS_T126-lis_t126.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_LIS_T126-lis_t126.png
===================================================================
--- checkout/doc/image/GRID_LIS_T126-lis_t126.png	(nonexistent)
+++ checkout/doc/image/GRID_LIS_T126-lis_t126.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_LIS_T126-lis_t126.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV90957_d03_sst_001hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV90957_d03_sst_001hrs.png
===================================================================
--- checkout/doc/image/DREV90957_d03_sst_001hrs.png	(nonexistent)
+++ checkout/doc/image/DREV90957_d03_sst_001hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV90957_d03_sst_001hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-T574r_frontrange.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-T574r_frontrange.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-T574r_frontrange.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-T574r_frontrange.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-T574r_frontrange.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126_conus.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_Reduced_T574-lis_t126_conus.png
===================================================================
--- checkout/doc/image/GRID_Reduced_T574-lis_t126_conus.png	(nonexistent)
+++ checkout/doc/image/GRID_Reduced_T574-lis_t126_conus.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_Reduced_T574-lis_t126_conus.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84552-sst_d01_48hrs_b.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84552-sst_d01_48hrs_b.png
===================================================================
--- checkout/doc/image/DREV84552-sst_d01_48hrs_b.png	(nonexistent)
+++ checkout/doc/image/DREV84552-sst_d01_48hrs_b.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84552-sst_d01_48hrs_b.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV84205-driver-image.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV84205-driver-image.png
===================================================================
--- checkout/doc/image/DREV84205-driver-image.png	(nonexistent)
+++ checkout/doc/image/DREV84205-driver-image.png	(revision 94669)

Property changes on: checkout/doc/image/DREV84205-driver-image.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-o_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-o_wam.png
===================================================================
--- checkout/doc/image/DREV76675-o_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-o_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-o_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlat.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlat.gif
===================================================================
--- checkout/doc/image/GRID_mom5_1deg_tripole-ocnlat.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_1deg_tripole-ocnlat.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_1deg_tripole-ocnlat.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/cime-nems-mediator.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/cime-nems-mediator.png
===================================================================
--- checkout/doc/image/cime-nems-mediator.png	(nonexistent)
+++ checkout/doc/image/cime-nems-mediator.png	(revision 94669)

Property changes on: checkout/doc/image/cime-nems-mediator.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV77954-sst_domain01_nonests_48hrs.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV77954-sst_domain01_nonests_48hrs.png
===================================================================
--- checkout/doc/image/DREV77954-sst_domain01_nonests_48hrs.png	(nonexistent)
+++ checkout/doc/image/DREV77954-sst_domain01_nonests_48hrs.png	(revision 94669)

Property changes on: checkout/doc/image/DREV77954-sst_domain01_nonests_48hrs.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/appbuilder-step0.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/appbuilder-step0.png
===================================================================
--- checkout/doc/image/appbuilder-step0.png	(nonexistent)
+++ checkout/doc/image/appbuilder-step0.png	(revision 94669)

Property changes on: checkout/doc/image/appbuilder-step0.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/appbuilder-step1.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/appbuilder-step1.png
===================================================================
--- checkout/doc/image/appbuilder-step1.png	(nonexistent)
+++ checkout/doc/image/appbuilder-step1.png	(revision 94669)

Property changes on: checkout/doc/image/appbuilder-step1.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/appbuilder-step2.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/appbuilder-step2.png
===================================================================
--- checkout/doc/image/appbuilder-step2.png	(nonexistent)
+++ checkout/doc/image/appbuilder-step2.png	(revision 94669)

Property changes on: checkout/doc/image/appbuilder-step2.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-nwn_wam.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-nwn_wam.png
===================================================================
--- checkout/doc/image/DREV76675-nwn_wam.png	(nonexistent)
+++ checkout/doc/image/DREV76675-nwn_wam.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-nwn_wam.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/appbuilder-step3.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/appbuilder-step3.png
===================================================================
--- checkout/doc/image/appbuilder-step3.png	(nonexistent)
+++ checkout/doc/image/appbuilder-step3.png	(revision 94669)

Property changes on: checkout/doc/image/appbuilder-step3.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/appbuilder-step4.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/appbuilder-step4.png
===================================================================
--- checkout/doc/image/appbuilder-step4.png	(nonexistent)
+++ checkout/doc/image/appbuilder-step4.png	(revision 94669)

Property changes on: checkout/doc/image/appbuilder-step4.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV70089-atm_inst_down_lw_flx.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV70089-atm_inst_down_lw_flx.png
===================================================================
--- checkout/doc/image/DREV70089-atm_inst_down_lw_flx.png	(nonexistent)
+++ checkout/doc/image/DREV70089-atm_inst_down_lw_flx.png	(revision 94669)

Property changes on: checkout/doc/image/DREV70089-atm_inst_down_lw_flx.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV53978-IPEtemp100km.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV53978-IPEtemp100km.png
===================================================================
--- checkout/doc/image/DREV53978-IPEtemp100km.png	(nonexistent)
+++ checkout/doc/image/DREV53978-IPEtemp100km.png	(revision 94669)

Property changes on: checkout/doc/image/DREV53978-IPEtemp100km.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/DREV76675-o2_ipe.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/DREV76675-o2_ipe.png
===================================================================
--- checkout/doc/image/DREV76675-o2_ipe.png	(nonexistent)
+++ checkout/doc/image/DREV76675-o2_ipe.png	(revision 94669)

Property changes on: checkout/doc/image/DREV76675-o2_ipe.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png
===================================================================
--- checkout/doc/image/GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png	(nonexistent)
+++ checkout/doc/image/GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png	(revision 94669)

Property changes on: checkout/doc/image/GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnmask.gif
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/GRID_mom5_1deg_tripole-ocnmask.gif
===================================================================
--- checkout/doc/image/GRID_mom5_1deg_tripole-ocnmask.gif	(nonexistent)
+++ checkout/doc/image/GRID_mom5_1deg_tripole-ocnmask.gif	(revision 94669)

Property changes on: checkout/doc/image/GRID_mom5_1deg_tripole-ocnmask.gif
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/image/little-nems-logo.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: checkout/doc/image/little-nems-logo.png
===================================================================
--- checkout/doc/image/little-nems-logo.png	(nonexistent)
+++ checkout/doc/image/little-nems-logo.png	(revision 94669)

Property changes on: checkout/doc/image/little-nems-logo.png
___________________________________________________________________
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: checkout/doc/GRID_mom5_0p5deg_tripole.md
===================================================================
--- checkout/doc/GRID_mom5_0p5deg_tripole.md	(nonexistent)
+++ checkout/doc/GRID_mom5_0p5deg_tripole.md	(revision 94669)
@@ -0,0 +1,34 @@
+MOM5/CICE 0.5 degree grid {#GRID_mom5_0p5deg_tripole}
+=========================
+
+Description
+-----------
+
+This page describes a MOM5 grid that is a tripolar at 0.5 degree
+resolution.  The grid is a tripole grid built around half degree
+spacing.  The latitudinal grid at the equator is 0.25 degrees and the
+tripole begins at about 65N.
+ 
+| Long Name                     | Name   | Value  |
+| :---------------------------- | :----- | :----- |
+| Number of longitudinal points | N<sub>i</sub>   | 720    |
+| Number of latitudinal points  | N<sub>j</sub>   | 410    |
+| Minimum longitude             | &nbsp; | 0.     |
+| Maximum longitude             | &nbsp; | 360.   |
+| Minimum latitude              | &nbsp; | -80.75 |
+| Maximum latitude              | &nbsp; | 89.75  | 
+
+Longitude Plot
+--------------
+
+\image html GRID_mom5_0p5deg_tripole-mom0.5_lon.gif
+ 
+Latitude/Mask Plot
+------------------
+
+\image html GRID_mom5_0p5deg_tripole-mom0.5_lat_mask.gif
+ 
+Latitudinal Grid Spacing Plot
+-----------------------------
+
+\image html GRID_mom5_0p5deg_tripole-mom0.5_dlat.gif
Index: checkout/doc/DREV88884.md
===================================================================
--- checkout/doc/DREV88884.md	(nonexistent)
+++ checkout/doc/DREV88884.md	(revision 94669)
@@ -0,0 +1,156 @@
+DREV88884: UGCS-Seasonal 0.4  {#milestone_DREV88884}
+============================
+
+\date Last revised: 2/27/2017
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/trunk
+
+Important notes about this revision
+-----------------------------------
+
+\note We have noticed that Gaea front-end nodes are set up
+differently. DREV88884 was tested on gaea10. We recommend that gaea10
+front-end node is used when building on Gaea.
+
+\note In order to run this milestone on Theia you must have a `~/.cshrc`
+file in your home directory that contains the line `limit stacksize
+unlimited`. The `~/.cshrc` with this line must exist no matter what
+shell you are using! If a `~/.tcshrc` file exists, the `limit stacksize
+unlimited` line must be added to it.  Description
+
+\todo Link to revisions mentioned below.
+
+This is the port of UGCS-Seasonal milestone 0.3 to Gaea. The revision
+number of this port is DREV88884. DREV88884 is code-wise identical to
+milestone 0.3, and on Theia is expected to behave identical to
+DREV80567 or the updated DREV87736.  Run Sequences
+
+UGCS-Seasonal includes two run sequences, a cold start sequence and a
+time integration sequence.
+
+Cold start sequence: The first cold start sequence initializes
+components using a miinimal set of files ingested by GSM. The cold
+start sequence only needs to run for a half hour. However, it runs for
+an hour because there is a limitation on running less than an hour in
+EMC scripts.
+
+    runSeq::
+      @1800.0
+        @600.0
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_prep_ice
+          MED -> ICE :remapMethod=redist
+          ICE
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+
+Time integration sequence: The second run sequence, shown below, is
+for the time integration loop. It is initialized by restart files
+generated by the cold start sequence. As in UGCS--Seasonal 0.1, there
+is a fast and a slow loop, at 10 minutes and 30 minutes, respectively.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        @600.0
+          MED MedPhase_prep_ice
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_write_restart
+      @
+    ::
+
+Validation
+----------
+
+This revision was shown to be bit for bit with DREV80567/87736 on theia.
+
+Download, Build, Run, and Restart
+---------------------------------
+
+### Download and Build
+
+\todo Add link to relevant portions of NEMS guide
+
+Instructions on how to download and build a NEMS application are
+discussed in the
+\ref documentation "NEMS User's Guide and Reference".
+Running UGCS-Seasonal with a cold start requires additional
+instructions, below.
+
+Please see Important notes about this revision section at the top of
+this page.  Cold Start and Run
+
+Compsets that can be run with this revision are:
+
+ * `cfsr%20150401_1hr_nems%cold_gsm%slg%T574_cice%0.5_mom5%0.5`
+ * `cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5`
+
+To run compsets, start within the UGCS-Seasonal directory and execute
+the NEMS CompsetRun tool by typing:
+
+    ./NEMS/NEMSCompsetRun -compset <compset name>
+
+If you leave off the `-compset` argument, CompsetRun will read the
+compset list from a local file.
+
+To initialize a new case of the UGCS-Seasonal from a cold start, run
+the cold start compset,
+`cfsr%20150401_1hr_nems%cold_gsm%slg%T574_cice%0.5_mom5%0.5`, to
+generate initial mediator restart files.  That compset runs the
+atm/ice/ocean sequentially for 1 hour. It will generate some initial
+mediator restart files consisting of initial values for coupling
+fields consistent with the current atmosphere, ocean, and sea ice
+conditions.  You then use those initial mediator files to startup a
+standard run with the same model initial conditions and initial model
+date as the cold start run.  To do this, run the coldstart compset
+using CompsetRun as specified above with the compset,
+`cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5`.
+
+After running the cold start compset, go into NEMS/NEMSCompsetRun and
+modify `setup_med_nems` to pre-stage the cold start mediator restart
+files instead of whatever files are set by default.  This is done in a
+section that looks like:
+
+    cp -f ${DATADIR}/MED_NEMS/${nemsgrid}${nemsgridinp}/* ${RUNDIR}
+    #  cp -f /scratch3/NCEPDEV/stmp1/Anthony.Craig/UGCS-Seasonal.r72808/20150401short_nemscold_gsm_cice_mom5/mediator*restart* ${RUNDIR}
+
+(You will need to adjust the directory, compset, and revision for this milestone.)
+
+Comment out the first line and uncomment the second line. In the
+second line, set the path to the cold start run directory where the
+cold start case just ran.  This will copy the mediator restart files
+from your cold start run directory into the new run directory.
+
+Once the cold start is done and the NEMSCompsetRun is modified, run a
+standard compset like
+`cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5` to advance
+the model from the initial conditions.  The system will start with the
+same atmosphere, ocean, and ice initial conditions as the cold start
+run plus the new mediator restart files, and the model will run
+concurrently.
Index: checkout/doc/DREV87779.md
===================================================================
--- checkout/doc/DREV87779.md	(nonexistent)
+++ checkout/doc/DREV87779.md	(revision 94669)
@@ -0,0 +1,140 @@
+DREV87779: WAM-IPE 0.5 Physically Realistic One-Way WAM to IPE Coupling {#milestone_DREV87779}
+=======================================================================
+
+\date Last revised: 02/03/2017
+
+Repository URL
+--------------
+
+ * https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+
+Description
+-----------
+
+This milestone is an internal release of a NEMS application with two
+active components. The active components are the Ionosphere
+Plasmasphere Electrodynamics (IPE) model and the Whole Atmosphere
+Model (WAM). All field exchanges in the system occur through the 
+\ref sw_mediator "space weather mediator". 
+This is a scientific milestone to ensure that the WAM-IPE coupled
+system is behaving correctly with data flowing one-way from WAM to
+IPE. In this revision WAM runs on a global 3D reduced Gaussian grid
+(WAM grid). The horizontal resolution is T62. The vertical component
+of this grid is 150 levels in pressure and has to be converted to
+height in order to couple with IPE. Because the relationship between
+pressure and height varies during a run, the actual heights of the
+levels of the WAM grid varies during a run. The maximum height of the
+WAM grid is approximately 800 km.  In this revision IPE runs on an 80
+x 170 flux tube grid (
+\ref GRID_IPE "IPE grid") that extends up to approximately
+360,000 km. Because of the difference in heights, the WAM grid only
+overlaps with the bottom of the IPE grid. The amount of the overlap
+depends on the current height of the WAM grid.
+
+\todo reference WAM-IPE in above paragraph
+
+NUOPC "caps", which are essentially wrappers for the coupling
+interface, are provided for all model components allowing these
+components to work in NEMS and other NUOPC-compliant systems. For
+example, the "IPE cap" allows the IPE model to work as a part of the
+NEMS system.
+
+In this release only a subset of possible fields are exchanged between
+WAM and IPE. The 
+[coupling fields spreadsheet](https://docs.google.com/a/noaa.gov/spreadsheets/d/1XaQZ-sli7AlZBDLXb30AvNladSuRlTVx5LZb22OMTe4/pubhtml)
+indicates in detail the status of the different coupled fields:
+
+ * The WAM output fields are extracted each time-step during the dynamics calculation part of the model code. The extraction and calculation necessary to generate the fields required by IPE are done by the subroutine get_w_z(). For efficiency's sake this is only done when WAM is running coupled to IPE. That this is occurring is indicated by setting the wam_ipe_coupling namelist variable to true. The fields are stored in the get_variables_for_WAM_IPE_coupling module. From there they are extracted by the WAM cap and passed out to the mediator. 
+
+ * The IPE input fields advertised only represent a subset of the full fields used by IPE. The reason for this is that the WAM and IPE grids only overlap at the bottom of the IPE grid. Transferring the entire IPE grid to the mediator would be inefficient, so only the part that potentially overlaps with WAM is transferred and used for regridding. In the IPE cap the fields received from the mediator are copied into a variable (wamfield) which has the full index space of the IPE fields. This transfer is complex because both representations of the data are collapsed to 1D for efficiency's sake. Once in the wamfield variable the data is extrapolated to fill the empty region. If acting as part of a coupled system (as in this milestone), IPE uses the data from wamfield as part of it's computations. 
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the WAM-IPE Build & Run page.  
+
+\todo reference the wam-ipe build and run page
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the default Regional configuration
+is provided below.  For details on the run sequence in general please
+refer to the 
+\ref configuring
+and
+\ref architecture
+pages.
+
+    runSeq::
+     @21600.0
+       ATM -> MED :remapMethod=redist
+       MED
+       MED -> IPM :remapMethod=redist
+       ATM
+       IPM
+     @
+    ::
+ 
+Validation
+----------
+
+During this test both components run for a 6 day simulation. IPE is
+driven by WAM fields: neutral temperature, and 3 components of the
+neutral wind. The coupling (calling) frequency is 3 minute. The run
+was made for 2013-03-16 under the intermediate solar flux condition
+(F107=120).
+
+A test report is available for this validation run: \subpage REPORT-20170204-WAM-IPE-1way
+
+### Comparison with Observations
+
+Figures 1 and 2 show that Total Electron Content (TEC) as calculated
+by IPE has reasonable agreement with TEC obtained from reanalysis data
+for the same date. The reanalysis data is obtained by using TEC
+observed by low earth orbit satellites, including COSMIC, GRACE,
+TerraSAR, Metop-A, Metop-B, Jason-2, and the Global Navigation
+Satellite System (GNSS) TEC1. The resolution is 5deg x 10deg x 30km
+and 1 hour. In both images, TEC is plotted as a function of geographic
+latitude and longitude.
+
+\image html DREV87779-reanalysis_tec_0317.png  Figure 1: Total Electron Content for 3/17/2013 from Reanalysis
+
+\image html DREV87779-ipe_tec_0316.png Figure 2: Total Electron Content for 3/17/2013 from IPE model
+
+ 
+### Demonstration of Variability Introduced from WAM
+
+Figures 3-8 show an example of the TEC every 6 days as a function of
+geographic longitude and latitude. It demonstrates that temporal and
+spatial evolution of the variability in TEC has been generated by the
+WAM fields that include forcing from the terrestrial weather (via
+tides, planetary waves, and gravity waves, etc.). In particular, the
+structure of the equatorial ionization anomaly (EIA, the density peaks
+on either side of the magnetic equator) have considerable amount of
+variation day by day due to the WAM winds and temperature only (even
+without WAM density and electrodynamics).  Figure 3: Total Electron
+Content for 3/16/2013 from IPE model
+
+ 
+
+\image html DREV87779-ipe_tec_0317.png Total Electron Content for 3/17/2013 from IPE model
+
+\image html DREV87779-ipe_tec_0318.png Figure 5: Total Electron Content for 3/18/2013 from IPE model
+ 
+\image html DREV87779-ipe_tec_0319.png Figure 6: Total Electron Content for 3/19/2013 from IPE model
+ 
+\image html DREV87779-ipe_tec_0320.png Figure 7: Total Electron Content for 3/20/2013 from IPE model
+
+\image html DREV87779-ipe_tec_0320.png Figure 7: Total Electron Content for 3/20/2013 from IPE modelipe_tec_0321.png Figure 8: Total Electron Content for 3/21/2013 from IPE model
+ 
+Limitations and Technical Notes
+-------------------------------
+
+ In this milestone, IPE does not use the density fields (O, O2, N2) coming from WAM. The reason for this is that the version of WAM used here does not include the necessary chemistry for the upper atmosphere. Furthermore, the version of IPE used here does not include self-consistent electrodynamics.
+
+ 
+
+1. Yue X. (2015). "Global ionospheric response during March 17-18, 2013 storm by reanalysis and TIEGCM simulation", presentation given at the Space-Atmosphere Interaction Region (SAIR) workshop. 
\ No newline at end of file
Index: checkout/doc/nemsdoc.md
===================================================================
--- checkout/doc/nemsdoc.md	(nonexistent)
+++ checkout/doc/nemsdoc.md	(revision 94669)
@@ -0,0 +1,444 @@
+Unsorted Content {#nemsguide}
+================
+
+\warning This page will soon be removed.  It is slowly being corrected
+and dismantled.  Its contents will reside in other subpages of the
+"Documentation" family of pages.
+
+6.3    NEMS Compsets and CompsetRun
+--------------------------------------
+
+NEMS uses `"component sets"`, or `"compsets,"` to systematically label run configurations. The labels are associated with scripts that pull together all the files and inputs needed to run the specified configurations. Compset labels include which components and mediators are part of the configuration, whether each component is running in a prognostic or test mode, resolutions, and initial conditions. This approach offers a number of benefits:
+* standard runs can be set up easily and consistently
+* it is a very effective way to implement regression testing across a coupled system with many possible combinations of components
+* easy access to configurations with non-prognostic versions of components facilitates controlled experimentation
+
+Compsets were originated by the [Community Earth System Model (CESM)](http://www.cesm.ucar.edu/models/cesm1.2/cesm/doc/modelnl/compsets.html).
+
+[Supported NEMS compsets are listed here](https://docs.google.com/spreadsheets/d/1v9tJb03YuCbwDsXff4M5i6jz4lvBxUrhdImqaGnK_IE/edit#gid=0).
+
+### 6.3.1 Running NEMS with Compsets
+
+NEMS compsets are available for revision 52376 and later versions of NEMS. The front-end to using the compset feature is provided by the NEMSCompsetRun script located under the NEMS root directory.
+
+From within a modeling application, e.g., UGCS-Seasonal, the `NEMSCompsetRun` is typically invoked from the root directory:
+
+    ./NEMS/NEMSCompsetRun
+
+The script will look for file(s) `*.compsetRun` in the root directory and execute the compsets listed therein.
+
+The script can also be invoked from within the NEMS directory itself:
+
+    ./NEMSCompsetRun [COMPSET_LIST_FILE]
+
+or
+
+    ./NEMSCompsetRun -compset COMPSET_FILE
+
+Running this script without the optional `COMPSET_LIST_FILE` argument will execute the compsets that are listed in file `./compsets/default.compsetRun` under the NEMS directory. An alternative list of compsets can be specified by using the `COMPSET_LIST_FILE` argument. The format of this file is very simple:
+
+-# A line starting with # is a comment.
+-# A line starting with DIR= specifies an alternative search directory for individual compset configuration files (explained in more detail below).
+-# Blank lines are ignored.
+-# All other lines are assumed to list a single compset configuration filename at the beginning of the line. The remainder of the line is ignored and can be used for comments.
+
+For example, here is the content of a sample COMPSET_LIST_FILE:
+
+    ### List of compsets ###
+    #####################
+
+    #DIR=  # optionally set this if not using default ./compsets directory
+
+    2013_sbys_nmmb%glob   	 ! stand-alone global NMM-B
+    AMIP_sbys_gsm         	 ! stand-alone GSM - fake example
+    2009_nems_gsm_cice_mom5   ! NEMS mediator coupled GSM-CICE-MOM5
+    2011_sbys_gsm%wam     	 ! stand-alone GSM run as WAM
+    2011_sbys_gsm%wam%ndsl	 ! stand-alone GSM run as WAM with NDSL
+
+For each compset listed, there must be an associated compset configuration file of the same name. By default the `./compsets` subdirectory under the NEMS root directory is searched for compset configuration files. The search directory can be changed by using the DIR=  directive in the `COMPSET_LIST_FILE`.The format of the compset configuration files is described below.
+
+Calling `NEMSCompsetRun` with the -compset `COMPSET_FILE` option allows to run a single compset without the need to first create a `COMPSET_LIST_FILE`. The `COMPSET_FILE` file must be specified with path (absolute or relative) so the compset file can be found by `NEMSCompsetRun`.
+
+**Starting in April, 2016, a more detailed specification for NEMS compsets, below, was introduced**. It adds the run duration after an initial condition label, and includes grid resolution as part of the specifier for each component.
+
+    cfsr%20150401_1hr_nems%cold_gsm%eul%T126_cice%0.5_mom5%0.5
+    cfsr%20150401_1hr_nems%cold_gsm%slg%T126_cice%0.5_mom5%0.5
+    cfsr%20150401_1day_nems_gsm%eul%T126_cice%0.5_mom5%0.5
+    cfsr%20150401_1day_nems_gsm%slg%T126_cice%0.5_mom5%0.5
+    cfsr%20150401_30day_nems_gsm%eul%T126_cice%0.5_mom5%0.5
+    cfsr%20150401_30day_nems_gsm%slg%T126_cice%0.5_mom5%0.5
+
+### 6.3.2   How to Add a New Compset
+
+A new compset is added to the system by adding a new compset configuration file, either in the default location under `./compsets`, or in a different location referenced by the `DIR=` setting as explained above. The name of the new configuration file is that of the new compset. The existing default compset configuration files under the `./compsets` subdirectory can be used as templates for new configurations. The compset configuration files represent the connection between the compset based approach to label different configurations and the existing NEMS run scripts. The files are very simple short pieces of shell scripts, exporting a number of variables that are being used by the existing NEMS run scripts. Writing a new compset thus requires some knowledge about the standard NEMS run scripts.
+
+#### 6.3.2.1	Compset Nomenclature
+
+On the machine level the name of a compset is not dissected or interpreted. Instead, the compset names are simply matched as literal strings against the name of the available compset configuration files. However, there is a nomenclature to be followed when naming compsets. The purpose of this  nomenclature is to help a human reader to quickly grasp what a specific compset is testing, and to bring some level of order into the many possible component configurations. The general naming format is:
+
+`caselabel[%optDescriptor][_optRunDuration]_architecture_model1[%opt1[%opt2[...[%optN]]]]_model2[...]_..._modelN[...]`
+
+The `"caselabel"` may be as simple as a calendar year, like "2009", indicating a specific set of initial conditions that are being used. This label can be used very freely to indicate specifics about the case being run. For the sake of uniformity the convention is to keep the case label below 16 characters, not use any special characters like underscores, and instead compound by camel case. It can take an option, for example the start data and/or time of an initial condition.
+
+The `"optRunDuration"` specifies the duration of the run specified in the compset.
+
+The `"architecture"` specifies the fundamental coupling mode. Details of the run sequence are set inside the compset file through the nems_configure variable (see below "Compset Configuration"). The currently available options are:
+
+
+
+|Architecture options | Description|
+|---------------------|------------|
+|blocked | Model components are coupled directly through connectors. All connectors are executed in a single block at the beginning of the coupling cycle, followed by executing all of the model components.|
+|leapfrog| Model components are coupled directly through connectors. Connectors and model components are interleaved. As a result, some model components receive forcing data that is at the start time of the next coupling step.|
+|nems    | Model components are coupled through a central mediator component. Most or all interactions are through the mediator. There may be some direct connections between model components. The default NEMS mediator is used.|
+|sbys    | Model components are running side-by-side for the same simulation conditions (start/end times), but without any interaction between the individual models. This architecture option also covers the standalone mode for the case where only a single model component is present.|
+|spaceweather | Model components are coupled through a central mediator component. Most or all interactions are through the mediator. There may be some direct connections between model components. The NEMS Spaceweather mediator is used.|
+
+The "model" labels indicate which specific model instances are being run. In order to reduce redundancies introduced by identical permutations, a specific order of model types is used when constructing compset names:
+-# `ATM` - Atmosphere
+-# `LND` - Land
+-# `ICE` - Sea Ice
+-# `OCN` - Ocean
+-# `WAV` - Wave
+-# `IPM` - Ionosphere-Plasmasphere
+-# `HYD` - Hydraulic
+
+Several model instances are available for each model type (instances in ** are not yet accessible):
+
+
+| Model type          | Instance options           | 
+| ------------------- | ----------- | 
+| `ATM`               | `satm, xatm, gsm, nmmb`    | 
+| `LND`               | `slnd, xlnd, lis**`          | 
+| `ICE`               | `sice, xice, cice`    | 
+| `OCN`               | `socn, xocn, hycom, mom5` | 
+| `WAV`               | `swav, xwav, ww3**`    | 
+| `IPM`               | `sipm, xipm, ipe` | 
+| `HYD`               | `shyd, xhyd, wrfhydro`    | 
+
+For each model type the current non-active instance options are listed below. The definition of these two options is similar to their use in CESM:
+- **s<model type>:** ***Stub components*** conform to the NUOPC rules for model components. They do not advertise any fields in their importState or exportState. Their primary use is to test control flow between components in a driver.
+- **x<model type>:** ***Dead components*** conform to the NUOPC rules for model components. They advertise fields in the importState and exportState that are appropriate for the specific model type. Import fields may be ignored internally. Export fields are filled with data that changes during time stepping, but has no scientific relevance. Their primary use is in coupled systems with other dead components to test the data transfers between components.
+
+Only model components that are present in specific compset configurations are listed in the compset name.
+
+The compset nomenclature supports appending option strings after each model component. Each option is introduced by a "%" character. Multiple options are simply concatenated. Each option string should be less than 16 characters long, use no special characters like underscores, and compounded by camel case.
+
+#### 6.3.2.2	Compset Configuration Parameters
+
+Each compset configuration file is a bash script that is sourced within the NEMSCompsetRun script as it iterates through the compset file list.  The variables defined in a compset configuration file are referenced by individual components to determine the configuration for that run.  The set of runtime configuration variables varies by component.  Please refer to the component for more information.  A list of variables specific to NEMS is listed below.  The compset configuration script is also responsible for setting up the run directory with component specific runtime configuration and data files.  Setup functions have been defined in NEMSCompsetRun for some components.  Once the runtime configuration is complete please call the appropriate regression test script.
+
+#### 6.3.2.2.1     Compset Variables
+
+|                      |                         |
+|----------------------|-------------------------|
+| `TEST_DESCR`         | : Compset description   |
+
+
+#### 6.3.2.2.2     NEMS Variables
+These variables are passed to the NEMS configuration file located in tests/nems.configure.[NEMS configuration filename].IN and read at runtime.
+
+
+|                               |                         |
+|-------------------------------|-------------------------|
+|`nems_configure`               |: Name of the NEMS configuration file template - includes the run sequence pattern|
+|`atm_model`                    |: Atmosphere component ("satm", "xatm", "gsm", "nmmb", "none")|
+|`atm_petlist_bounds`           |: Atmosphere component petlist lower and upper bounds|
+|`lnd_model`                    |: Land component ("slnd", "xlnd", "lis", "none")|
+|`lnd_petlist_bounds`           |: Land component petlist lower and upper bounds|
+|`ice_model`                    |: Sea ice component ("sice", "xice", "cice", "none")|
+|`ice_petlist_bounds`           |: Sea ice component petlist lower and upper bounds|
+|`ocn_model`                    |: Ocean component ("socn", "xocn", "mom5", "hycom", "none")|
+|`ocn_petlist_bounds`           |: Ocean component petlist lower and upper bounds|
+|`wav_model`                    |: Wave component ("swav", "xwav", "ww3", "none"|
+|`wav_petlist_bounds`           |: Wave component petlist lower and upper bounds|
+|`ipm_model`                    |: Space weather component ("sipm", "xipm", "ipe", "none")|
+|`ipm_petlist_bounds`           |: Space weather component petlist lower and upper bounds|
+|`hyd_model`                    |: Hydrology component ("shyd", "xhyd", "wrfhydro", "none")|
+|`hyd_petlist_bounds`           |: Hydrology component petlist lower and upper bounds|
+|`med_model`                    |: Mediator ("nems", "none")|
+|`med_petlist_bounds`           |: Mediator petlist lower and upper bounds|
+|`atm_coupling_interval_sec`    |: Atmosphere run sequence coupling interval|
+|`ocn_coupling_interval_sec`    |: Ocean run sequence coupling interval|
+|`coupling_interval_sec`        |: Run sequence coupling interval|
+|`coupling_interval_slow_sec`   |: Run sequence slow coupling interval|
+|`coupling_interval_fast_sec`   |: Run sequence fast coupling interval|
+:
+
+#### 6.3.2.2.3     Component Default Variable Functions
+|                               |                         |
+|-------------------------------|-------------------------|
+|`export_nmm`             |: Set NMM configuration variables to default values |
+|`export_gsm`             |: Set GSM configuration variables to default values |
+|`export_fim `            |: Set FIM configuration variables to default values |
+|`export_nems`            |: Set NEMS configuration variables to default values|
+
+
+#### 6.3.2.2.4     Component Setup Functions
+|                                |                         |
+|--------------------------------|-------------------------|
+|`setup_hycom`                   |: Link HYCOM test case runtime configuration and data files to regression test directory|
+|`setup_mom5cice`                |: Copy MOM5 test case runtime configuration and data files to regression test directory.|
+|`setup_ipe`                     |: Link IPE test case runtime configuration and data files to regression test directory.|
+|`setup_spaceweather`            |: Link SpaceWeather test case runtime configuration and data files to regression test directory.|
+|`setup_wrfhydro`                |: \<hydro_namelist_file\> \<parameter_data_folder\> \<namelist_hrldas_file\>: Link WRFHydro test case|
+|`setup_lis \<lis_config_file>`  |: Link LIS/Noah test case config and data|
+|`setup_ww3`                     |: Link WaveWatch3 test case config and data|
+
+
+
+#### 6.3.2.2.5     GFS Regression Test Variables
+|                               |                         |
+|-------------------------------|-------------------------|
+|`CNTL_DIR`                     |: Control run directory.  This is the source directory for control run files.|
+|`LIST_FILES `                    |: Control run file list.  Files are compared using the unix cmp utility.|
+
+##### 6.3.2.2.6     Run Script
+|                               |                         |
+|-------------------------------|-------------------------|
+|`RUN_SCRIPT`|: Script to be executed after component variables have been set. E.g. rt_gfs.sh, rt_nmm.sh, ...
+
+
+
+7.  How to Configure a NEMS Application
+==========================================
+
+7.1    Changing Run Time, Queue, Project Number
+-----------------------------------------------
+
+The wall times are set in the compset, and those files are in NEMS/compsets and the variable name is WLCL, for example:
+
+    export WLCLK=30
+
+You can set more than 60 minutes. Change the queue and project number in `NEMS/NEMSCompsetRun`, in the section like this:
+
+    elif [ $MACHINE_ID = yellowstone ]; then
+    export DATADIR=/glade/p/work/theurich/NEMS-Data
+    export ACCNR=P35071400
+    # export ACCNR=UCUB0024                                                                             
+    export QUEUE=small
+
+7.2    Changing Run Directory
+---------------------------------
+
+All outputs go into the run directory, which goes to a unique directory (e.g. under /glade/scratch/${user}). The directory will have a name like rt_*.  When you run the `./NEMS/NEMSCompsetRun`, you will see output like this,
+
+    RUNDIR:
+    '/scratch3/NCEPDEV/stmp1/Anthony.Craig/rt_128577/20150401short_nem_gsm_cice_mom5'.
+
+
+
+7.3  Model Output Control
+---------------------------
+
+This is the directory where things are run and where the output data will be. The default is hourly output from all parts of the model and is a lot of data.  You will want to reduce that if you are running 30 or more days. To reduce the output, in your nems.configure settings, edit this file, NEMS/tests/nems.configure.med_atm_ocn_ice.IN
+
+and set
+
+`DumpFields = false`
+
+everywhere DumpFields is set.  That will turn off output from the mediator and all the caps.  Or you can selectively turn of some of the output in the different caps and mediator.
+
+To change the **GSM output frequency**, edit your compset, for example NEMS/compsets/20150401long_nems_gsm_cice_mom5.
+
+Change to:
+
+    export FHZER=6
+    export FHOUT=6
+
+`FHZER` is the GSM accumulated fields zeroing frequency.
+
+`FHOUT` is the GSM output frequency in hours. 
+
+To change the **MOM output frequency**, edit
+
+    NEMS/NEMSCompsetRun
+
+and under setup_mom5, change from 1 hourly to 6 hourly output by changing
+
+    cp ${OCN_INPUT}/diag_table.1hr ${dst_dir}/diag_table
+
+to
+    
+    cp ${OCN_INPUT}/diag_table.6hr ${dst_dir}/diag_table
+
+To change the **CICE output history frequency**, the ice_in namelist needs to be modified.
+
+Edit ice_in_mine in $SOMEDIR.
+
+    histfreq = ?m?,?d?,?h?,?x?,?x?
+    histfreq_n = 1,1,6,1,1,1
+
+To add a second daily stream and a third 6-hourly stream. Change the ?d? or ?h? to ?x? if you do not want the extra streams. Also, you need to tell the ice what variables go on what stream.
+
+For example:
+
+    f_hi = ?mdhxx?
+    f_aice = ?mdhxx?
+    f_Tsfc = ?mdxxx?
+
+Where ice thickness (hi) and concentration (aice) will go on all three streams, while Tsfc will only go on the first two (monthly and daily) streams. This will create history files under the ?history? subdirectory of the form:
+
+iceh.YYYY-MM.nc or iceh.YYYY-MM-DD.nc, etc
+
+You can edit the NEMS/NEMSCompsetRun file to make sure it is copying your namelist.
+
+    cp $SOMEDIR/ice_in_mine ${dst_dir}/ice_in
+
+
+7.4  Changing Restart Frequency
+----------------------------------
+
+The restart frequency is controlled in each model separately. 
+
+To modify the **MOM restart frequency**, edit restart_interval in the NEMS configure file, NEMS/tests/nems.configure.med_atm_ocn_ice.IN.  restart_interval is set in seconds.
+
+To modify the **GSM restart frequency**:
+
+The GSM does not currently support true restart from restart files. Instead, it is able to restart from the forecast output history files. This is controlled by the variable ?NDAYS? specified in the compset. For instance, if a restart after 30 days is required, set NDAYS to 30 and run the forecast. Two files from this run are needed to restart the GSM, sigf720 and sfcf720. These will be read by the GSM when the run is setup for a restart. When true restart from restart file capability works in GSM, restart file frequency will be controlled by the variable ?FHRES?, specified in the compset. (For now, FHRES must be set to a number greater than ?NDAYS * 24?, otherwise the model will not finish successfully.)
+
+The **CICE restart output frequency** is also controlled by the namelist (ice_in). Edit your copy of the namelist:
+Edit `$SOMEDIR/ice_in_mine`
+
+Change the variables:
+
+    dumpfreq = ?s?
+    dumpfreq_n = 21600
+
+To have 4 x daily restarts written. Acceptable units for dumpfreq are ?s?, ?d?, ?m?, and ?y?.
+You can edit the NEMS/NEMSCompsetRun file to make sure it is copying your namelist.
+cp $SOMEDIR/ice_in_mine ${dst_dir}/ice_in
+
+**The mediator is currently setup to write restarts at the end of the run** and to overwrite the restarts in the run directory.  To write out restarts during the run, edit the mediator restart_interval in the NEMS configure file, NEMS/tests/nems.configure.med_atm_ocn_ice.IN.  restart_interval is set in seconds.  Restarts written during the run with have a timestamp prepended to the filename.
+
+
+
+7.5  Changing PET Counts (PE layout)
+--------------------------------------
+
+To change pe counts, do the following.  First, edit the compset (ie. NEMS/compsets/cfsr%20150401_1day_nems_gsm%slg%T126_cice%0.5_mom5%0.5) and change these lines:
+
+    export TASKS=136
+    export atm_petlist_bounds="0 23"     
+    export ocn_petlist_bounds="24 55"  
+    export ice_petlist_bounds="56 75" 	
+    export med_petlist_bounds="76 135"
+
+Those specify the start and end task ID for each component as well as the total number of TASKS required. 
+
+If you have changed the CICE pe count, then you will need to modify the file CICE/comp_ice.backend and change the following lines and rebuild the model from scratch.
+
+    setenv NTASK 	20   	# total number of processors
+    setenv BLCKX 	72   	# x-dimension of blocks ( not including )
+    setenv BLCKY	205	# y-dimension of blocks (  ghost cells  )
+
+If you have changed the MOM pe count, then you will need to modify the mom namelist input file, input.nml and set
+
+    layout = 8,4
+    io_layout = 1,4
+
+If you have changes the GSM pe count, then **\<information needed>**
+If you have changed the MEDIATOR pe count, the mediator will adapt automatically.
+
+
+
+8.  How to Modify a NEMS Application
+==========================================
+
+
+
+9.  How to Create a NEMS Application
+==========================================
+
+In order to use the AppBuilder, a ?project? directory and repository at EMC needs to be set up with an AppBuilder configuration file. This can be done either with existing EMC projects or new projects. The approach is outlined below.
+
+
+9.1  How to create a *new* NEMS application that uses AppBuilder
+-------------------------------------------------------------------
+
+We want to create a project ABC that does not exist yet.  An EMC
+contact first needs to set up a new repository/directory on the EMC
+server: /projects/ABC
+
+* Check out your new ABC repo: 
+
+       svn co svnemc.ncep.noaa.gov:/projects/ABC
+       cd ABC
+
+* Make the standard subdirectories: 
+
+       svn mkdir trunk branches tags
+       cd trunk
+
+* Edit the externals property of your project root directory: 
+
+       svn propedit svn:externals .
+
+* In the editor, define external links, for example:
+
+       # comments are okay
+       NEMS       -r 123456 https://svnemc.ncep.noaa.gov/projects/nems/trunk
+       WW3        -r 234567 https://svnemc.ncep.noaa.gov/projects/ww3/branches/esmf2/model
+       MOM5       -r 456    https://github.com/feiliuesmf/mom/branches/mom5_5_0_2_nuopc
+       MOM5_CAP   -r 12     https://github.com/feiliuesmf/nems_mom_cap/trunk
+       CICE       -r 65     https://github.com/feiliuesmf/lanl_cice/trunk
+       CICE_CAP   -r 32     https://github.com/feiliuesmf/lanl_cice_cap/trunk
+
+* Commit the svn:externals property:
+
+       svn commit
+
+* After exiting the editor from the previous step 
+
+       svn update
+
+ This will create subdirectories for each of the externals (for
+ example NEMS) and will checkout the specified version of each of the
+ external codes.
+
+* Create an AppBuilder configuration file in your the current directory (`/projects/ABC/trunk`). The file must have the following format and must have the extension `\<project name>.appbuilder`  Templates are located in `./NEMS/AppBuilder`. [An example configuration for the Climate Forecast System (now UGCS-Seasonal) is included on this page](http://esgf.esrl.noaa.gov/projects/couplednems/appbuilder).
+
+* Build in this directory by using the AppBuilder script:
+  `./NEMS/NEMSAppBuilder` If the component is already built on the
+  platform: if the component executable exists in the expected
+  directory, it will not build it again. Currently there is not a
+  mechanism to point to an executable in a different location, but
+  that can be added.
+
+* Component configurations (compsets) can now be run that are
+  comprised of the components that were identified in the AppBuilder
+  file. Compsets that include components not identified in the project
+  AppBuilder will abort. There are standard NEMS compsets in the
+  directory:
+
+       ./NEMS/compsets/*
+       ./NEMS/NEMSCompsetRun -compset 
+       ./NEMS/compsets/20150401short_nems_gsm_cice_mom5
+
+People can add to this global collection of compsets, or can create
+compsets locally. For local compsets, we recommend adding a compset
+directory to the current directory (`projects/ABC/trunk/`). [This page
+discusses compset syntax]().
+
+* For regression testing or other cases where there is a desire to
+   perform multiple runs in sequence, you can create a compset run
+   file. It needs to have a `*.compsetRun` extension. This basically
+   just a list of compsets, one per line. The whole list (say
+   abc.compsetRun) can be run with the command
+
+       ./NEMS/NEMSCompsetRun abc.compsetRun
+
+
+9.2  How to create a NEMS application that uses AppBuilder for an existing EMC project
+--------------------------------------------------------------------------------------
+
+We recommend creating a new directory that sits at the same level as
+the current trunk, e.g. transition_trunk, and treating that
+transition_trunk as the current directory. Associated directories
+might be called transition_branches, transition_tags, etc. Eventually
+these should be renamed to trunk, branches, tags once the transition
+is complete and the previous trunk can be removed.
+
+
+
+
Index: checkout/doc/GRID_CICE_gx3.md
===================================================================
--- checkout/doc/GRID_CICE_gx3.md	(nonexistent)
+++ checkout/doc/GRID_CICE_gx3.md	(revision 94669)
@@ -0,0 +1,44 @@
+CICE Grid gx3 {#GRID_CICE_gx3}
+=============
+
+Description
+-----------
+
+This pge describes a CICE 3 degree global lat-lon POP grid (gx3). The
+gx3 grid is a regular spherical grid in both hemispheres.
+ 
+| Long Name                     | Name   | Value   |
+| :---------------------------- | :----- | :------ |
+| Number of longitudinal points | N<sub>i</sub>   | 100     |
+| Number of latitudinal points  | N<sub>j</sub>   | 116     |
+| Minimum longitude             | &nbsp; | 0.0     |
+| Maximum longitude             | &nbsp; | 360.0   |
+| Minimum latitude              | &nbsp; | -78.007 |
+| Maximum latitude              | &nbsp; | 89.90   |
+ 
+Longitude Plot
+--------------
+
+\image html GRID_CICE_gx3-lon.gif
+
+Latitude Plot
+-------------
+
+\image html GRID_CICE_gx3-lat.gif
+ 
+Mask Plot
+---------
+
+  Not yet available
+
+ 
+Data Decomposition
+------------------
+
+The CICE grid is decomposed into regular 2-dimensional blocks.  These
+blocks are then distributed to the PETs.  The size of the blocks is a
+compile time setting, while the distribution of the blocks is a run
+time namelist setting.  The CICE model threads over blocks.  The
+optimum decomposition is dependent on a number of things including the
+cost of halo updates and the location of sea ice.  The user has quite
+a bit of flexibility in setting the CICE decompositon.
\ No newline at end of file
Index: checkout/doc/GRID_mom5_lonlat.md
===================================================================
--- checkout/doc/GRID_mom5_lonlat.md	(nonexistent)
+++ checkout/doc/GRID_mom5_lonlat.md	(revision 94669)
@@ -0,0 +1,61 @@
+MOM5 1 degree global tripolar grid coordinate values {#GRID_mom5_lonlat}
+====================================================
+
+Longitude values
+----------------
+
+| &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   | &deg; lon   |
+| :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: |
+| -279.5000   | -278.5000   | -277.5000   | -276.5000   | -275.5000   | -274.5000   | -273.5000   | -272.5000   | -271.5000   | -270.5000   | -269.5000   | -268.5000   |
+| -267.5000   | -266.5000   | -265.5000   | -264.5000   | -263.5000   | -262.5000   | -261.5000   | -260.5000   | -259.5000   | -258.5000   | -257.5000   | -256.5000   |
+| -255.5000   | -254.5000   | -253.5000   | -252.5000   | -251.5000   | -250.5000   | -249.5000   | -248.5000   | -247.5000   | -246.5000   | -245.5000   | -244.5000   |
+| -243.5000   | -242.5000   | -241.5000   | -240.5000   | -239.5000   | -238.5000   | -237.5000   | -236.5000   | -235.5000   | -234.5000   | -233.5000   | -232.5000   |
+| -231.5000   | -230.5000   | -229.5000   | -228.5000   | -227.5000   | -226.5000   | -225.5000   | -224.5000   | -223.5000   | -222.5000   | -221.5000   | -220.5000   |
+| -219.5000   | -218.5000   | -217.5000   | -216.5000   | -215.5000   | -214.5000   | -213.5000   | -212.5000   | -211.5000   | -210.5000   | -209.5000   | -208.5000   |
+| -207.5000   | -206.5000   | -205.5000   | -204.5000   | -203.5000   | -202.5000   | -201.5000   | -200.5000   | -199.5000   | -198.5000   | -197.5000   | -196.5000   |
+| -195.5000   | -194.5000   | -193.5000   | -192.5000   | -191.5000   | -190.5000   | -189.5000   | -188.5000   | -187.5000   | -186.5000   | -185.5000   | -184.5000   |
+| -183.5000   | -182.5000   | -181.5000   | -180.5000   | -179.5000   | -178.5000   | -177.5000   | -176.5000   | -175.5000   | -174.5000   | -173.5000   | -172.5000   |
+| -171.5000   | -170.5000   | -169.5000   | -168.5000   | -167.5000   | -166.5000   | -165.5000   | -164.5000   | -163.5000   | -162.5000   | -161.5000   | -160.5000   |
+| -159.5000   | -158.5000   | -157.5000   | -156.5000   | -155.5000   | -154.5000   | -153.5000   | -152.5000   | -151.5000   | -150.5000   | -149.5000   | -148.5000   |
+| -147.5000   | -146.5000   | -145.5000   | -144.5000   | -143.5000   | -142.5000   | -141.5000   | -140.5000   | -139.5000   | -138.5000   | -137.5000   | -136.5000   |
+| -135.5000   | -134.5000   | -133.5000   | -132.5000   | -131.5000   | -130.5000   | -129.5000   | -128.5000   | -127.5000   | -126.5000   | -125.5000   | -124.5000   |
+| -123.5000   | -122.5000   | -121.5000   | -120.5000   | -119.5000   | -118.5000   | -117.5000   | -116.5000   | -115.5000   | -114.5000   | -113.5000   | -112.5000   |
+| -111.5000   | -110.5000   | -109.5000   | -108.5000   | -107.5000   | -106.5000   | -105.5000   | -104.5000   | -103.5000   | -102.5000   | -101.5000   | -100.5000   |
+| -99.50000   | -98.50000   | -97.50000   | -96.50000   | -95.50000   | -94.50000   | -93.50000   | -92.50000   | -91.50000   | -90.50000   | -89.50000   | -88.50000   |
+| -87.50000   | -86.50000   | -85.50000   | -84.50000   | -83.50000   | -82.50000   | -81.50000   | -80.50000   | -79.50000   | -78.50000   | -77.50000   | -76.50000   |
+| -75.50000   | -74.50000   | -73.50000   | -72.50000   | -71.50000   | -70.50000   | -69.50000   | -68.50000   | -67.50000   | -66.50000   | -65.50000   | -64.50000   |
+| -63.50000   | -62.50000   | -61.50000   | -60.50000   | -59.50000   | -58.50000   | -57.50000   | -56.50000   | -55.50000   | -54.50000   | -53.50000   | -52.50000   |
+| -51.50000   | -50.50000   | -49.50000   | -48.50000   | -47.50000   | -46.50000   | -45.50000   | -44.50000   | -43.50000   | -42.50000   | -41.50000   | -40.50000   |
+| -39.50000   | -38.50000   | -37.50000   | -36.50000   | -35.50000   | -34.50000   | -33.50000   | -32.50000   | -31.50000   | -30.50000   | -29.50000   | -28.50000   |
+| -27.50000   | -26.50000   | -25.50000   | -24.50000   | -23.50000   | -22.50000   | -21.50000   | -20.50000   | -19.50000   | -18.50000   | -17.50000   | -16.50000   |
+| -15.50000   | -14.50000   | -13.50000   | -12.50000   | -11.50000   | -10.50000   | -9.500000   | -8.500000   | -7.500000   | -6.500000   | -5.500000   | -4.500000   |
+| -3.500000   | -2.500000   | -1.500000   | -0.5000000  | 0.5000000   | 1.500000    | 2.500000    | 3.500000    | 4.500000    | 5.500000    | 6.500000    | 7.500000    |
+| 8.500000    | 9.500000    | 10.50000    | 11.50000    | 12.50000    | 13.50000    | 14.50000    | 15.50000    | 16.50000    | 17.50000    | 18.50000    | 19.50000    |
+| 20.50000    | 21.50000    | 22.50000    | 23.50000    | 24.50000    | 25.50000    | 26.50000    | 27.50000    | 28.50000    | 29.50000    | 30.50000    | 31.50000    |
+| 32.50000    | 33.50000    | 34.50000    | 35.50000    | 36.50000    | 37.50000    | 38.50000    | 39.50000    | 40.50000    | 41.50000    | 42.50000    | 43.50000    |
+| 44.50000    | 45.50000    | 46.50000    | 47.50000    | 48.50000    | 49.50000    | 50.50000    | 51.50000    | 52.50000    | 53.50000    | 54.50000    | 55.50000    |
+| 56.50000    | 57.50000    | 58.50000    | 59.50000    | 60.50000    | 61.50000    | 62.50000    | 63.50000    | 64.50000    | 65.50000    | 66.50000    | 67.50000    |
+| 68.50000    | 69.50000    | 70.50000    | 71.50000    | 72.50000    | 73.50000    | 74.50000    | 75.50000    | 76.50000    | 77.50000    | 78.50000    | 79.50000    |
+
+Latitude values
+---------------
+
+| &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   | &deg; lat   |
+| :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: | :---------: |
+| -81.50000   | -80.50000   | -79.50000   | -78.50000   | -77.50000   | -76.50000   | -75.50000   | -74.50000   | -73.50000   | -72.50000   | -71.50000   | -70.50000   |
+| -69.50000   | -68.50000   | -67.50000   | -66.50000   | -65.50000   | -64.50000   | -63.50000   | -62.50000   | -61.50000   | -60.50000   | -59.50000   | -58.50000   |
+| -57.50000   | -56.50000   | -55.50000   | -54.50000   | -53.50000   | -52.50000   | -51.50000   | -50.50000   | -49.50000   | -48.50000   | -47.50000   | -46.50000   |
+| -45.50000   | -44.50000   | -43.50000   | -42.50000   | -41.50000   | -40.50000   | -39.50000   | -38.50000   | -37.50000   | -36.50000   | -35.50000   | -34.50000   |
+| -33.50000   | -32.50000   | -31.50000   | -30.50000   | -29.50000   | -28.50143   | -27.50710   | -26.51979   | -25.54212   | -24.57656   | -23.62538   | -22.69058   |
+| -21.77392   | -20.87680   | -20.00033   | -19.14525   | -18.31191   | -17.50033   | -16.71014   | -15.94058   | -15.19058   | -14.45871   | -13.74323   | -13.04212   |
+| -12.35312   | -11.67377   | -11.00143   | -10.33333   | -9.666666   | -9.002051   | -8.343542   | -7.695041   | -7.060205   | -6.442354   | -5.844389   | -5.268724   |
+| -4.717221   | -4.191149   | -3.691149   | -3.217221   | -2.768724   | -2.344389   | -1.942354   | -1.560205   | -1.195041   | -0.8435420  | -0.5020515  | -0.1666663  |
+| 0.1666670   | 0.5020523   | 0.8435428   | 1.195042    | 1.560206    | 1.942354    | 2.344390    | 2.768725    | 3.217222    | 3.691150    | 4.191150    | 4.717222    |
+| 5.268725    | 5.844390    | 6.442354    | 7.060205    | 7.695042    | 8.343543    | 9.002052    | 9.666667    | 10.33333    | 11.00143    | 11.67377    | 12.35313    |
+| 13.04212    | 13.74323    | 14.45871    | 15.19058    | 15.94058    | 16.71014    | 17.50033    | 18.31191    | 19.14525    | 20.00033    | 20.87680    | 21.77392    |
+| 22.69058    | 23.62538    | 24.57656    | 25.54212    | 26.51979    | 27.50710    | 28.50143    | 29.50000    | 30.50000    | 31.50000    | 32.50000    | 33.50000    |
+| 34.50000    | 35.50000    | 36.50000    | 37.50000    | 38.50000    | 39.50000    | 40.50000    | 41.50000    | 42.50000    | 43.50000    | 44.50000    | 45.50000    |
+| 46.50000    | 47.50000    | 48.50000    | 49.50000    | 50.50000    | 51.50000    | 52.50000    | 53.50000    | 54.50000    | 55.50000    | 56.50000    | 57.50000    |
+| 58.50000    | 59.50000    | 60.50000    | 61.50000    | 62.50000    | 63.50000    | 64.50000    | 65.50000    | 66.50000    | 67.50000    | 68.50000    | 69.50000    |
+| 70.50000    | 71.50000    | 72.50000    | 73.50000    | 74.50000    | 75.50000    | 76.50000    | 77.50000    | 78.50000    | 79.50000    | 80.50000    | 81.50000    |
+| 82.50000    | 83.50000    | 84.50000    | 85.50000    | 86.50000    | 87.50000    | 88.50000    | 89.50000    | &nbsp;      | &nbsp;      | &nbsp;      | &nbsp;      |
Index: checkout/doc/DREV93202.md
===================================================================
--- checkout/doc/DREV93202.md	(nonexistent)
+++ checkout/doc/DREV93202.md	(revision 94669)
@@ -0,0 +1,346 @@
+DREV93202: UGCS-Seasonal 0.5 {#milestone_DREV93202}
+============================
+
+\date 5/24/2017
+
+Repository URL
+--------------
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/branches/external_land
+
+Subversion Revision: r93202
+
+Description
+-----------
+
+This milestone is a version of the atmosphere-ocean-ice coupled
+UGCS-Seasonal application with the addition of external land surface
+(LIS/Noah 3.3) and hydrology (WRF-Hydro) components. The land surface
+and hydrology components are one-way coupled, receiving their import
+states from the NEMS Mediator. LIS/Noah 3.3 runs on a global T574
+domain and WRF-Hydro runs on a 
+\ref GRID_Front_Range_Regional "small regional grid". The ATM-OCN-ICE
+components are initialized with April 1, 2015 CFSR-based initial
+conditions. No spin-up run has been completed for the land surface and
+hydrology components, so both LIS and WRF-Hydro are initialized with
+cold start conditions.
+
+\todo reference grids
+
+This is a follow-on to the 
+\ref milestone_DREV70089 "Reg-Hydro 0.2 release" with the following changes:
+
+* The separate atmosphere layer (NEMS/src/atmos) originally in NEMS
+  has been removed and GSM is using its own NUOPC cap. This was
+  accomplished by branching off of the ATM-refactor branch.
+
+* A complete set of coupling fields are sent from the Mediator to LIS
+  and WRF-Hydro. Previously, only a subset of fields were transferred
+  to validate grid interpolation.
+
+* GSM is in semi-Lagrangian mode on the T574 grid.  The previous
+  milestone used the Eulerian dycore on the T126 grid.
+
+* CICE and MOM use 
+  \ref GRID_mom5_0p5deg_tripole "0.5 degree tripolar grids".  The
+  previous milestone used 1 degree tripolar grids.
+
+* LIS is running on the reduced T574 grid with coldstart initial
+  values.  The previous milestone ran on the T126 grid.  New
+  configuration options have been added to the LIS cap including: the
+  ability to write NUOPC state restart files, log optimizations,
+  compilation with debugging.
+
+* WRF-Hydro continues to run on the Front Range regional domain with
+  coldstart initial values.  New configuration options have been added
+  to the WRF-Hydro cap including: the ability to write NUOPC state
+  restart files, log optimizations, and compilation with debugging
+
+The milestone has been validated by (1) ensuring that the original
+three-way coupled system produces physically realistic mediator
+restart files, (2) validating the quality of the regridding between
+ATM->LND and LND->HYD in the Mediator, and (3) verifying that the
+LIS/Noah 3.3 and WRF-Hydro produce physically releastic output from
+cold start conditions after 1 day and 5 day runs.
+
+The release is available on the "external_land" branch listed
+above. No effort was made to merge back to the trunk due to recent
+significant NEMS build/run infrastructure changes.
+
+Run Sequences
+-------------
+
+UGCS-Seasonal includes two run sequences, a cold start sequence and a
+time integration sequence.
+
+<b>Cold start sequence</b>: The first cold start sequence initializes
+components using a minimal set of files ingested by GSM. The cold
+start sequence only needs to run for a half hour. However, it runs for
+an hour because there is a limitation on running less than an hour in
+EMC scripts.
+
+    runSeq::
+      @1800.0
+        @600.0
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_prep_lnd
+          MED -> LND :remapMethod=redist
+          LND
+          LND -> MED :remapMethod=redist
+          MED MedPhase_prep_hyd
+          MED -> HYD :remapMethod=redist
+          HYD
+          HYD -> MED :remapMethod=redist
+          MED MedPhase_prep_ice
+          MED -> ICE :remapMethod=redist
+          ICE
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+
+<b>Time integration sequence</b>: The second run sequence, shown
+below, is for the time integration loop. It is initialized by restart
+files generated by the cold start sequence. There is a fast and a slow
+loop, at 10 minutes and 30 minutes, respectively.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        @600.0
+          MED MedPhase_prep_ice
+          MED MedPhase_prep_hyd
+          MED MedPhase_prep_lnd
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          MED -> LND :remapMethod=redist
+          MED -> HYD :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          LND
+          HYD
+          ICE
+          ATM -> MED :remapMethod=redist
+          LND -> MED :remapMethod=redist
+          HYD -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_write_restart
+      @
+    ::
+
+New Coupling Fields
+-------------------
+
+### Mediator to LIS
+
+| Standard Name                 | Units      | Interpolation Method |
+| :---------------------------- | :--------- | :------------------- |
+| inst_down_lw_flx              | W m-2      | conservative         |
+| inst_down_sw_flx              | W m-2      | conservative         |
+| inst_merid_wind_height_lowest | m s-1      | bilinear             |
+| inst_pres_height_surface      | Pa         | bilinear             |
+| inst_spec_humid_height_lowest | kg kg-1    | bilinear             |
+| inst_temp_height_lowest       | K          | bilinear             |
+| inst_zonal_wind_height_lowest | m s-1      | bilinear             |
+| mean_prec_rate                | kg m-2 s-1 | conservative         |
+
+
+### LIS to Mediator
+
+
+| Standard Name                        | Units | Interpolation Method |
+| :----------------------------------- | :---- | :------------------- |
+| temperature_of_soil_layer_1          | K     | bilinear             |
+| temperature_of_soil_layer_2          | K     | bilinear             |
+| temperature_of_soil_layer_3          | K     | bilinear             |
+| temperature_of_soil_layer_4          | K     | bilinear             |
+| moisture_content_of_soil_layer_1     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_2     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_3     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_4     | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_1 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_2 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_3 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_4 | m m-1 | bilinear             |
+| surface_runoff_flux                  | m s-1 | conservative         |
+| subsurface_runoff_flux               | m s-1 | conservative         |
+
+### Mediator to WRF-Hydro
+ 
+| Standard Name                        | Units | Interpolation Method |
+| :----------------------------------- | :---- | :------------------- |
+| temperature_of_soil_layer_1          | K     | bilinear             |
+| temperature_of_soil_layer_2          | K     | bilinear             |
+| temperature_of_soil_layer_3          | K     | bilinear             |
+| temperature_of_soil_layer_4          | K     | bilinear             |
+| moisture_content_of_soil_layer_1     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_2     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_3     | m m-1 | bilinear             |
+| moisture_content_of_soil_layer_4     | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_1 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_2 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_3 | m m-1 | bilinear             |
+| liquid_water_content_of_soil_layer_4 | m m-1 | bilinear             |
+| surface_runoff_flux                  | m s-1 | conservative         |
+| subsurface_runoff_flux               | m s-1 | conservative         |
+
+Validation
+----------
+
+UGCS-Seasonal 0.5 did not bit-for-bit reproduce the NEMS mediator
+restart files generated from the Atm Refactor work or the NEMS
+mediator restart files generated from 
+\ref milestone_DREV88884 "UGCS-Seasonal 0.4".  Note that
+the Atm Refactor work also did not bit-for-bit reproduce the NEMS
+mediator restart files generated from 
+\ref milestone_DREV88884 "UGCS-Seasonal 0.4".  There have
+been changes made to the NEMS run system that are likely to link in
+different initialization conditions.  Milestone validation was carried
+out through
+
+
+1. Producing data plots of all ATM-LND and LND-HYD coupled fields and
+   inspecting the regridding.
+
+2. Producing data plots of LIS output including all eight Noah.3.3
+   meterological forcing fields, soil temperatures, soil moisture
+   content, liquid fraction of soil moisture content, and runoff.
+
+3. Producing data plots of WRF-Hydro output including streamflow.
+
+###  NEMS Mediator Regridding Samples (2015-04-01:00:01:00)
+
+\image html DREV93202-shade_MED_FROM_ATM_inst_temp_height_lowest_4.gif
+\image html DREV93202-shade_MED_TO_HYD_temperature_of_soil_layer_1_4.gif
+
+### NEMS Mediator End of Run Samples (2015-04-06:00:00:00)
+
+\image html DREV93202-shade_MEDIATOR_inst_spec_humid_height_lowest_1.gif
+\image html DREV93202-shade_MEDIATOR_moisture_content_of_soil_layer_1_1.gif
+
+###  LIS Output End of Run Samples (2015-04-06:00:00:00)
+
+\image html DREV93202-shade_LIS_Qair_f_tavg_1.gif
+\image html DREV93202-shade_LIS_SoilMoist_tavg_1.gif
+
+###  WRF-Hydro Output End of Run Samples (2015-04-06:00:00:00)
+
+\image html DREV93202-streamflow.gif
+
+Sample Plots: https://esgf.esrl.noaa.gov/projects/couplednems/ugcs-seasonal_0_5_plots
+
+Download, Build, Run, and Restart
+---------------------------------
+
+### Download and Build
+
+To check out and build the application:
+
+    svn co -r92981 https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/branches/external_land UGCS_seasonal_0_5
+    cd UGCS_seasonal_0_5
+    ./NEMS/NEMSAppBuilder
+
+General instructions on how to download and build a NEMS application
+are discussed in section 1.1.1 and 1.1.2 in the NEMS User's Guide and
+Reference.
+
+\todo Update guide reference when the guide is finished
+
+### Cold Start, 1 Day Run, and 5 Day Run
+
+Compsets that can be run with this revision are:
+
+Coupled
+
+    cfsr%20150401_1hr_nems%cold_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5
+    cfsr%20150401_1day_nems_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5
+    cfsr%20150401_5day_nems_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5
+
+Side-by-Side/Standalone
+
+    cfsr%20150401_1day_sbys_lis%T574
+    cfsr%20150401_1day_sbys_wrfhydro%frontrange
+
+To run the default UGCS-Seasonal compset list, use this command from the app directory:
+
+    ./NEMS/NEMSCompsetRun
+
+Note: The default UGCS-Seasonal compset list includes the 1 hour coldstart and 1 day restart. In order to run the 5 day restart or the 30 day restart please change the QUEUE value set in NEMS/NEMSCompsetRun
+
+To run a specific compset:
+
+    ./NEMS/NEMSCompsetRun -compset <compset name>
+
+Note: In order to run the 5 day restart or the 30 day restart please
+change the QUEUE value set in NEMS/NEMSCompsetRun
+
+Mediator Restart Files
+----------------------
+
+The NEMS Mediator is capable of reading in a set of restart files
+containing initial values of coupling fields. This is required in
+order to run components concurrently. NEMS Mediator restart files are
+provided on Theia as part of this release, so it is not required to
+generate new restart files. However, the process is documented below
+in case new restarts need to be generated. Note this is the same
+process as is documented in the 
+\ref milestone_DREV88884 "UGCS-Seasonal 0.4" release.
+
+\note NEMS Mediator restart files are provided on Theia as part of
+this release, so it is not required to generate new restart
+files. However, the process is documented below in case new restarts
+need to be generated. Note this is the same process as is documented
+in the 
+\ref milestone_DREV88884 "UGCS-Seasonal 0.4" release.
+
+To initialize a new case of the UGCS-Seasonal from a cold start, run
+the cold start compset,
+`cfsr%20150401_1hr_nems%cold_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5`,
+to generate initial mediator restart files.  This compset runs the
+atm/land/hyd/ice/ocean sequentially for 1 hour. It will generate some
+initial mediator restart files consisting of initial values for
+coupling fields consistent with the current atmosphere, ocean, sea
+ice, land, and hydrology conditions.  You then use those initial
+mediator files to startup a standard run with the same model initial
+conditions and initial model date as the cold start run.  To do this,
+run the coldstart compset using CompsetRun as specified above with the
+compset,
+`cfsr%20150401_1day_nems_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5`.
+
+After running the cold start compset, go into NEMS/NEMSCompsetRun and
+modify "setup_med_nems" to pre-stage the cold start mediator restart
+files instead of whatever files are set by default.  This is done in a
+section that looks like:
+
+    cp -f ${DATADIR}/MED_NEMS/${nemsgrid}${nemsgridinp}/* ${RUNDIR}
+    # cp -f /scratch3/NCEPDEV/stmp1/Anthony.Craig/UGCS-Seasonal.r72808/20150401short_nemscold_gsm_cice_mom5/mediator*restart* ${RUNDIR}
+
+(You will need to adjust the copy from path to your coldstart run directory.)
+
+Comment out the first line and uncomment the second line. In the
+second line, set the path to the cold start run directory where the
+cold start case just ran.  This will copy the mediator restart files
+from your cold start run directory into the new run directory.
+
+Once the cold start is done and the NEMSCompsetRun is modified, run a
+standard compset like
+cfsr%20150401_1day_nems_gsm%slg%T574_lis%T574_wrfhydro%frontrange_cice%0.5_mom5%0.5
+to advance the model from the initial conditions.  The system will
+start with the same atmosphere, ocean, and ice initial conditions as
+the cold start run plus the new mediator restart files, and the model
+will run concurrently.
\ No newline at end of file
Index: checkout/doc/prep_inputs.py
===================================================================
--- checkout/doc/prep_inputs.py	(nonexistent)
+++ checkout/doc/prep_inputs.py	(revision 94669)
@@ -0,0 +1,227 @@
+#! /usr/bin/env python
+
+import re, subprocess, sys, getopt, datetime, glob, os
+
+bad_flag=False # set to true if errors occur
+
+FLAG_FILE='success-flag-file.txt'
+NEMS_PROJECT_NAME='NEMS'
+NEMS_MAIN_PAGE_MD='nemsmain.md'
+APP_DOC_MAIN='doc/README.md'  # app-level main doc file relative to app-level checkout
+
+def main():
+    app_mode, nems_path, app_paths = scan_args(sys.argv[1:])
+
+    # app_mode = True: we are running within an app checkout via "make app_doc"
+    # app_mode = False: nems checkout via "make nems_doc"
+
+    nems_rev, nems_loc = scan_nems(nems_path)
+    app_info=[ list(scan_app(d)) for d in app_paths ]
+    main_page=NEMS_MAIN_PAGE_MD
+
+    # Determine project name (eg. "NEMS")
+    # and project number (eg. "branches/update-docs at 93510")
+    if app_mode:
+        if len(app_info)!=1:
+            error('Running in app_doc mode but no app is present.\n')
+            sys.exit(1)
+
+        app0_readme=os.path.join(app_info[0],APP_README)
+        if os.path.exists(app0_readme):
+            main_page=app0_readme
+
+        app_rev, app_name, app_loc = app_info[0]
+
+        project_number=app_loc+'@'+app_rev
+        project_name='NEMS App '+app_name
+    else:
+        project_number=nems_loc+'@'+nems_rev
+        project_name=NEMS_PROJECT_NAME
+
+    with open('Doxyfile','wt') as dw:
+        with open('Doxyfile.IN','rt') as dr:
+            for line in dr:
+                dw.write(line.replace('--PROJECT_NUMBER--',project_number)
+                         .replace('--PROJECT_NAME--',project_name)
+                         .replace('--MAIN_PAGE--',main_page)
+                         .replace('--CWD--',os.path.realpath(os.getcwd())))
+
+    with open('milestones.dox','wt') as dw:
+        with open('milestones.dox.IN','rt') as dr:
+            for line in dr:
+                if line.find('--MILESTONES GO HERE')>=0:
+                    dw.write(get_milestones())
+                else:
+                    dw.write(line)
+
+    if not bad_flag:
+        with open(FLAG_FILE,'wt') as successf:
+            successf.write('Doxygen setup completed at %s\n'%(
+                datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),))
+    elif os.path.exists(FLAG_FILE):
+        os.remove(FLAG_FILE)
+        error('Errors detected.  Please fix them and rerun.\n')
+        sys.exit(1)
+
+# ----------------------------------------------------------------------
+
+def error(what):
+    global bad_flag
+    bad_flag=True
+    sys.stderr.write('prep_inputs.py error: '+what.strip()+'\n')
+
+# ----------------------------------------------------------------------
+
+def scan_args(arglist):
+    try:
+        opt,args=getopt.getopt(arglist,'a')
+    except getopt.GetoptError as e:
+        sys.stderr.write(str(e)+'\n')
+        sys.exit(2)
+    return ( ('-a' in opt), args[0], args[1:] )
+
+# ----------------------------------------------------------------------
+
+def check_output(cmd):
+    # Workaround for lack of subprocess.check_output in 2.6
+    p=subprocess.Popen(['sh','-c',cmd],executable='/bin/sh',stdout=subprocess.PIPE)
+    (out,err)=p.communicate()
+    if p.poll():
+        raise Exception('%s: non-zero exit status'%(cmd,))
+
+    print '%s => %s --- %s'%(repr(cmd),repr(out),repr(err))
+    return out
+
+# ----------------------------------------------------------------------
+
+def scan_nems(nems_dir):
+    nems_rev=None
+    nems_loc=None
+
+    svn_info=check_output('svn info '+nems_dir)
+    for line in svn_info.splitlines():
+        r=re.match('''(?x) (?:
+              ^ Revision: \s+ (?P<revision> \d+ )
+            | ^ URL: .*? (?P<loc> branches/\S+ | tags/\S+ | [^/]+ )$ 
+        ) ''', line.strip())
+        if not r: 
+            continue
+        if r.group('revision'):   nems_rev=r.group('revision')
+        if r.group('loc'):        nems_loc=r.group('loc')
+
+    if not nems_rev:
+        error('%s: could not get svn revision\n'%(nems_dir,))
+        nems_rev='unknown'
+
+    if not nems_loc:
+        error('%s: could not get nems location '
+              'relative to top of project\n'%(nems_dir,))
+        nems_loc='nems'
+
+    return nems_rev, nems_loc
+
+# ----------------------------------------------------------------------
+
+def scan_app(app_dir):
+    app_rev=None
+    app_name=None
+    app_loc=None
+    
+    info_up2=check_output('svn info '+app_dir)
+    for lines in info_up1.splitlines():
+        r=re.match('''(?x) (?:
+              ^ Revision (?P<revision> \d+ )
+            | ^ URL: https://.*?/apps/ (?P<name>[^/]+)
+            | ^ URL: .*? (?P<loc> branches/\S+ | tags/\S+ | [^/]+) $
+            )''', line.strip())
+        if not r: continue
+        if r.group('revision'):   app_rev=r.group('revision')
+        if r.group('loc'):        app_loc=r.group('loc')
+        if r.group('name'):       app_name=r.group('name')
+
+    if not app_rev:
+        error('%s: could not get svn revision\n'%(app_dir,))
+        app_rev='unknown'
+
+    if not app_name:
+        error('%s: could not get app name\n'%(app_dir,))
+        app_name='app'
+
+    if not app_loc:
+        error('%s: could not get svn location '
+              'relative to app directory\n'%(app_dir,))
+        app_rev='app'
+
+    return app_rev, app_name, app_loc
+
+# ----------------------------------------------------------------------
+
+def order_milestones(a,b):
+    # Sort milestones by decreasing number.
+    return -cmp( re.sub('[a-zA-Z]+','',a),
+                 re.sub('[a-zA-Z]+','',b) )
+
+def get_milestones():
+    milestones=list()
+
+    md_file_list=set(glob.glob('DREV*.md'))|set(glob.glob('R[0-9]*.md'))
+    md_file_list=sorted(md_file_list,order_milestones)
+
+    print 'Reverse-sorting milestones:'
+
+    for mdfile in md_file_list:
+        mdbase=mdfile.replace('.md','')
+        expected_header_id='milestone_'+mdbase
+
+        # We're looking for this at the top of the mdfile:
+        #
+        # My Fancy Title     {#header_id}
+        # ==============
+
+        # Get the first non-blank line:
+        with open(mdfile,'rt') as mdf:
+            for line in mdf.readlines():
+                if line.split():
+                    break
+
+        # Get the header id:
+        m=re.search('\{#([A-Za-z][A-Za-z0-9_-]+)\}',line)
+        if not m:
+            error('%s: does not have a header with id {#%s} '
+                  'on first line\n'%(mdfile,expected_header_id))
+            continue
+        header_id=m.group(1)
+        if header_id != expected_header_id:
+            error('%s: header id %s should be %s'%(
+                mdfile,header_id,expected_header_id))
+            continue
+
+        # Get the title:
+        m=re.match('^(.*?)\s+\{#',line)
+        if not m:
+            error('%s: cannot get title from first line'%(mdfile,))
+            continue
+        title=m.group(1)
+        if not title:
+            error('%s: cannot get title from first line'%(mdfile,))
+            continue
+
+        print '%s = %s'%(header_id,title)
+
+        milestones.append('  + @subpage %s'%(header_id,))
+
+    if not milestones:
+        return ''
+
+    # 
+    return '\n'.join(milestones)+'\n'
+
+# ----------------------------------------------------------------------
+
+def get_app_list(app_info):
+    return '' # FIXME: implement this
+
+# ----------------------------------------------------------------------
+
+if __name__=='__main__':
+    main()

Property changes on: checkout/doc/prep_inputs.py
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: checkout/doc/structure.md
===================================================================
--- checkout/doc/structure.md	(nonexistent)
+++ checkout/doc/structure.md	(revision 94669)
@@ -0,0 +1,211 @@
+Repository Structure and Versioning {#structure}
+===================================
+
+NEMS modeling applications are multi-component coupled systems that
+must pull together, in a systematic way, components from different
+locations. The NEMS AppBuilder enables users to construct a specific,
+versioned modeling application from a versioned set of model
+components and configuration files. The AppBuilder approach also helps
+to ensure that changes made to the different applications are
+coordinated as they get checked back into the NEMS repository.
+
+The NEMS AppBuilder introduces two levels of configuration control:
+the modeling application level and the NEMS level. This split provides
+the model developer with full version control over all aspects of the
+modeling application code, while hiding many technical details about
+the infrastructure at the NEMS level.
+
+Exposed to/changeable by the user:
+
+* Version of NEMS.
+* List of external components (e.g. MOM5, CICE, ...) required by the application.
+* Version of each required external component.
+* Location of source and installation directory for NEMS and each external component.
+* Compiler and 3rd party library versions.
+
+Mostly hidden from the user:
+* Knowledge base of how to build each of the supported external components.
+* Infrastructure to configure and build NEMS according to the
+  information specified by the application layer.
+* Coupling system based on ESMF/NUOPC.
+
+Repository Version Tracking
+---------------------------
+
+NEMS uses Subversion (SVN) and Git in combination for many-component
+modeling systems. In this approach, the code associated with a
+versioned, coupled modeling system is a collection of external
+repository tags that are accessed through the svn:external
+property. External components and other external elements which reside
+in either other Subversion or GitHub repositories can easily be
+included in this process since GitHub provides subversion access to
+Git repository tags.
+
+Both the NEMS and CESM use this approach. With the AppBuilder, each
+application (e.g. HYCOM-GSM-CICE, NEMSfv3gfs, ...) is under its own SVN
+revision controlled directory. This directory contains the
+application-specific AppBuilder configuration file and a number of
+svn:external links. It is through these links that specific revisions
+of every component, and NEMS itself, are checked out when the
+application directory is checked out. The actual source code (for each
+component and NEMS) then lives in its own SVN or Git
+repository. Appendix A shows in detail how this is done in CESM.
+
+\todo Add appendix A
+
+The recommended best management practice for implementing the
+application version control is with SVN using externals. SVN externals
+allow the application to point to specific versions of NEMS and
+specific versions of each of the external components. Only external
+components actually used by the application need to be referenced.
+
+Overall Repository Structure
+----------------------------
+
+The NEMS repository is divided into three types of areas:
+
+ * NEMS Framework - This repository contains the mediator, build
+   system, compset runner, and this documentation
+
+ * model components and caps - These reside in other repositories
+
+ * NEMS Applications - A group of repositories, one for each NEMS
+   application.  They contain Subversion Externals to the NEMS
+   Framework, and each component.
+
+Application Repository Directories
+----------------------------------
+
+Following the above recommendation, the application directory
+structure under SVN control looks like this:
+
+ * GSM-MOM5-CICE5 
+    
+    * GSM-MOM5-CICE5.appBuilder - application-specific AppBuilder
+      file, listing required external components
+
+    * ... other *.appBuilder files ...
+
+    * conf/ - build configuration files
+
+    * modulefiles/ - `module` files to load external dependencies such as NetCDF
+
+       * theia/ - `module` files for Theia machine
+
+       * wcoss.cray/ - `module` files for the Cray partition of WCOSS
+
+       * ... more platform support ...
+    
+    * compsets/ - definition of compsets for this application
+
+    * parm/ - small parameter files for compsets.  Large files are
+      located outside of the repository.
+
+    * log/ - subdirectory for log files from execution and compilation.
+      These are placed in the repository log/ directory so that they can
+      be used to track changes, by re-using compsets as regression tests.
+    
+    * NEMS  - subversion external to NEMS
+    
+       * \ref building "NEMSAppBuilder"
+
+       * \ref running "NEMSCompsetRun"
+
+       * exe/ - Built executables are placed here
+
+       * src/
+
+          * ... NEMS code located here ...
+
+       * test/ - Underlying implementation of the NEMS compset runner
+
+    * CICE - Subversion external to CICE model
+
+       * ... CICE code located here ...
+
+    * MOM5 - Subversion external to MOM5 model
+
+       * ... CICE code located here ...
+
+    * MOM5_CAP - Subversion external to the NUOPC CAP of MOM5
+
+       * ... NUOPC CAP code for MOM5 located here ...
+
+    * ... other components ...
+
+This structure gives full control of versioning of all of the model
+code to the application.  One can obtain the entire structure,
+including components, by checking out the application from Subversion.
+For a hypothetical `SomeApp` app,
+
+    svn co https://svnemc.ncep.noaa.gov/projects/nems/apps/SomeApp/trunk SomeApp
+
+This checks out everything needed to build the initial revision of the
+`SomeApp` application prototype from source into a directory called
+`SomeApp`. 
+
+Compilation instructions can be found in this page: 
+
+ * \ref building
+
+
+Location of the Modeling Application Directory
+----------------------------------------------
+
+The application directory can be located anywhere under revision
+control. However, most of the NCEP applications are expected to be
+located on the EMC SVN server.  It is very simple to relocate a
+directory that is under SVN control to a different location at any
+time (or to rename it), without losing any history. For this reason we
+have set up a staging area here:
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps
+
+where applications may first be placed before a final location is
+identified. We recommend creating the canonical trunk, tags, branches
+directory triplet under each application for consistency.  For
+example, the HYCOM-GSM-CICE coupling project has an application here:
+
+ * /projects/nems/apps/HYCOM-GSM-CICE
+   * branches
+   * tags
+   * trunk
+
+At any point in the future the HYCOM-GSM-CICE directory can be renamed
+and/or copied to a different location on the SVN server. None of the
+revision history will be lost.  This setup provides a very structured
+approach for early development work, without committing to anything
+that cannot be changed in the future.
+
+#### 6.2.1.2	Handling External Components
+
+NEMS includes some external components whose primary development
+repository is outside of EMC. The application level provides the
+necessary control over which version of a specific external component
+is used in the modeling application.
+
+External components can be located anywhere under revision
+control. There is a staging area:
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/external_components
+
+where external components may first be placed before a final location
+is identified (in some cases back at the parent organization). We
+recommend creating the canonical trunk, tags, branches directory
+triplet under each application for consistency.  E.g., at this point
+the future XYZ component has this location and directory structure:
+
+ * /projects/nems/external_components/XYZ
+   * branches
+   * tags
+   * trunk
+
+At any point in the future the XYZ directory can be renamed and/or
+moved to a different location on the SVN server. None of the revision
+history will be lost.  This setup provides a very structured approach
+for early development work, without committing to anything that cannot
+be changed in the future.
+
+For example, the FV3 component is here:
+
+ * /projects/fv3/trunk
Index: checkout/doc/DREV84205.md
===================================================================
--- checkout/doc/DREV84205.md	(nonexistent)
+++ checkout/doc/DREV84205.md	(revision 94669)
@@ -0,0 +1,111 @@
+DREV84205: Wave 0.1 Two-Way Wave-Atmosphere Coupling  {#milestone_DREV84205}
+====================================================
+
+\date Revision date: 11/8/2016
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/branches/twoWayWW3
+
+Description
+-----------
+
+Wave 0.1 (DREV84205) is a two way coupled configuration of the 
+[Global Spectral Model (GSM)](http://www.emc.ncep.noaa.gov/index.php?branch=GFS)
+and WAVEWATCH III wave model. GSM runs on a 
+\ref GRID_gsm "T126 grid".
+WAVEWATCH III runs on a grid (T188) which uses the same
+latitude/longitude grid as GSM. GSM passes 10 m wind speeds [m/s] to
+WAVEWATCH III and WAVEWATCH III passes the roughness length z0 [m] to
+GSM. GSM's physics has been modified to update its internal roughness
+length values over the ocean to values accepted from WAVEWATCH
+III. Initialization for GSM is performed using Climate Forecast System
+Reanalysis (CFSR) data for April 1, 2015 and WAVEWATCH III is
+initialized from rest.
+
+\todo Add grid pages and reference them
+
+This revision is a technical illustration of the two-way field
+exchange between GSM and WAVEWATCH III on the same grid.  It has been
+tested sucessfully on the NOAA research platform theia and the EMC
+platform wcoss.  This revision has been run for 10 days and exhibits
+behavior that is roughly physically reasonable.  Further updates and
+study on higher resolution grids are required before further
+validation can be made.
+
+Further updates and studying on higher resolution grids are required
+before further validation can be made.
+
+Run Sequences
+-------------
+
+A leapfog coupling sequence is used with a 30 minute coupling time step.
+
+    runSeq::
+      @1800.0
+        WAV -> ATM :srcMaskValues=1
+        ATM 
+        ATM -> WAV
+        WAV
+      @
+    ::
+ 
+Validation
+----------
+
+Comparisons of the significant wave height (HS), 10 m wind speeds, and
+roughness length (z0) between the two way (GSM<->WW3) and the one-way
+coupled (GSM->WW3) systems are shown below.
+
+------------------------------------------------------------------------
+
+### NEMS Driver
+
+\image html DREV84205-driver-image.png "Figure 1: Driver overview"
+
+------------------------------------------------------------------------
+
+### Time 8 day +00:00 differences = (GSM\<->WW3) - (GSM->WW3)
+
+\note These results are from a development system and do not represent
+a NOAA prediction or product.
+
+\image html DREV84205-plot-page-1.png "Figure 2: Time 00:00 differences = (GSM\<->WW3) - (GSM->WW3)"
+
+------------------------------------------------------------------------
+
+### Time 8 day +01:00 differences = (GSM\<->WW3) - (GSM->WW3)
+
+\note These results are from a development system and do not represent
+a NOAA prediction or product.
+
+\image html DREV84205-plot-page-2.png "Figure 2: Time 01:00 differences = (GSM\<->WW3) - (GSM->WW3)"
+
+------------------------------------------------------------------------
+
+Build and Run
+-------------
+
+### Download and Build
+
+Instructions on how to download and build a NEMS application are in
+the
+\ref documentation "NEMS User's Guide and Reference".
+To download this particular revision,
+
+     svn co -r 84205 https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/branches/twoWayWW3 UGCS-Seasonal-twoWayWW3
+
+### Run
+
+Compsets that can be run with this revision are:
+
+  * `cfsr%20150401_10day_leapfrog_gsm%slg%T126_ww3%t188`
+  * `cfsr%20150401_10day_leapfrog%oneway_gsm%slg%T126_ww3%t188`
+
+The first compset is for two-way coupling between GSM and WW3 and the
+second is for one-way coupling from GSM to WW3. To run these compset,
+start within in the UGCS-Seasonal-twoWayWW3 directory and execute the
+NEMSCompsetRun tool with the following command:
+ 
+    ./NEMS/NEMSCompsetRun -compset NEMS/compset/<compset> 
\ No newline at end of file
Index: checkout/doc/documentation.dox
===================================================================
--- checkout/doc/documentation.dox	(nonexistent)
+++ checkout/doc/documentation.dox	(revision 94669)
@@ -0,0 +1,17 @@
+/**@page documentation Users Guide and Reference
+
+These pages contain general documentation for the NEMS, such as
+guides, howtos and system descriptions.
+
+  + @subpage introduction
+  + @subpage architecture
+  + @subpage structure
+  + @subpage building
+  + @subpage configuring
+  + @subpage running
+  + @subpage standards
+  + @subpage mediator
+  + @subpage sw_mediator
+  + @subpage cap-page
+
+*/
\ No newline at end of file
Index: checkout/doc/DREV58214.md
===================================================================
--- checkout/doc/DREV58214.md	(nonexistent)
+++ checkout/doc/DREV58214.md	(revision 94669)
@@ -0,0 +1,163 @@
+DREV58214: UGCS-Seasonal 0.1  {#milestone_DREV58214}
+============================
+
+\date 6/30/2015
+
+Description
+-----------
+
+DREV58214: UGCS-Seasonal 0.1 is an internal delivery of the three-way
+coupled GSM-CICE-MOM5 seasonal modeling application. Three-way
+coupling is implemented through the central NEMS mediator.
+ 
+In this revision, CICE receives from GSM dynamic fields including wind
+and stress, and thermodynamic fields such as lowest level temperature,
+specific humidity, height, radiation, precipitation, and derived air
+density. It also receives currents and sea surface temperature from
+MOM5. CICE sends its momentum flux to the mediator where it is merged
+with the GSM momentum flux and sent to MOM5. CICE ice fraction and
+masking information is sent to GSM. Other fields that should be sent
+from CICE to GSM are exported but are not used by the atmosphere model
+in this revision.
+ 
+MOM5 receives surface pressure, merged momentum flux, mean net
+longwave and banded shortwave radiation, precipitation, and sensible
+heat flux from GSM. MOM5 sends ocean currents to CICE and sea surface
+temperature to both GSM and CICE.
+ 
+[The full set of exchange fields for this revision is listed here.](https://docs.google.com/spreadsheets/d/11t0TqbYfEqH7lmTZ7dYe1DSCh6vOUFgX-3qvXgce-q0/edit#gid=0)
+The fields exchanged are those that have a "C" (complete) in column D
+of the sheet.
+
+\todo Move exchange field list to repo
+ 
+The UGCS-Seasonal modeling application in this revision is not yet
+initialized properly.  This means that during spin-up data fields
+initialized to 0. are sent into the models, and it is not until the
+second coupling step that actual data is being exchanged.  The model
+appears technically stable even under this condition.
+ 
+Model Grids
+-----------
+
+\b GSM uses the global Gaussian T126 grid as documented on the 
+\ref GRID_gsm "GSM Grid"
+page. Internally GSM uses a reduced representation with shuffled
+latitudes. In this milestone revision, the import and export fields
+are defined on a non-reduced, unshuffled T126 grid, without masks. The
+coordinates of the import/export grid are written to files
+array_gsm_grid_coord1.nc and array_gsm_grid_coord2.nc during
+initialization and from the mediator to files
+array_med_atm_grid_coord1.nc and atm_med_atm_grid_coord2.nc
+
+\b MOM5 uses a 1 degree tripolar grid documented on the 
+\ref GRID_mom5_1deg_tripole "MOM5 grid"
+page.  The coordinates written from the mediator to the files
+array_med_ocn_grid_coord1.nc and array_med_ocn_grid_coord2.nc.
+
+\b CICE uses the 1 degree tripolar 
+\ref GRID_mom5_1deg_tripole "MOM5 grid".
+
+\todo Link to grid pages
+
+Coupling and Mediator Details
+-----------------------------
+
+\ref mediator "A general overview of the NEMS mediator is here."
+The mediator receives the model grids at initialization and creates a
+decomposition of those grids on the mediator PETs. The generic
+NUOPC_Connector redistributes the data from the model PETs and
+decomposition to the mediator PETs and decomposition.  The fields are
+transferred on their native model grids.
+
+The generic NUOPC_Connectors used in this milestone revision employ:
+
+    regridmethod: redist
+
+The mediator regridding is done using 
+
+    regridmethod: bilinear
+    polemethod: teeth
+    srcMaskValues = 0
+    dstMaskValues = 0
+    unmappedaction: ignore
+
+for state variables, and
+
+    regridmethod: conserve
+    polemethod: n/a
+    srcMaskValues = 0
+    dstMaskValues = 0
+    unmappedaction: ignore
+
+for flux variables and ice_fraction.
+
+Coupling Accumulation
+---------------------
+
+The mediator accumulates and averages all fields passed between models
+between coupling intervals.  The ocean is coupled every 2 hours and
+the atmosphere and ice models are coupled every 1 hour.
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the default UGCS-Seasonal
+configuration is provided below. For details please refer to the
+\ref configuring
+and
+\ref architecture
+pages.
+
+              CICE  
+               | 
+    GSM <-> Mediator <-> MOM5
+    
+    #############################################
+    ####  NEMS Run-Time Configuration File  #####
+    #############################################
+    
+    # MED #
+    med_model:                      nems
+    med_petlist_bounds:             60 65
+    
+    # ATM #
+    atm_model:                      gsm
+    atm_petlist_bounds:             0 31
+    
+    # OCN #
+    ocn_model:                      mom5
+    ocn_petlist_bounds:             32 55
+    
+    # ICE #
+    ice_model:                      cice
+    ice_petlist_bounds:             56 59
+    
+    # Run Sequence #
+    runSeq::
+      @7200.0
+        OCN -> MED
+        MED MedPhase_slow
+        MED -> OCN
+        OCN
+        @3600.0
+          MED MedPhase_fast_before
+          MED -> ATM
+          MED -> ICE
+          ATM
+          ICE
+          ATM -> MED
+          ICE -> MED
+          MED MedPhase_fast_after
+        @
+      @
+    ::
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) are provided in: 
+
+* \ref building
+* \ref running
\ No newline at end of file
Index: checkout/doc/DREV73436.md
===================================================================
--- checkout/doc/DREV73436.md	(nonexistent)
+++ checkout/doc/DREV73436.md	(revision 94669)
@@ -0,0 +1,177 @@
+DREV73436: WAM-IPE 0.2 Send Analytical Fields from WAM to IPE {#milestone_DREV73436}
+=============================================================
+
+\date 04/04/2016
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+
+Description
+-----------
+
+\todo make IPE pages
+
+\todo make WAM pages
+
+This milestone is an internal release of a NEMS application with one
+active component and one data component. The active component is the
+[Ionosphere Plasmasphere Electrodynamics (IPE)](https://esgf.esrl.noaa.gov/projects/wam_ipe/IPE)
+model. The data component is the data version of the 
+[Whole Atmosphere Model (WAM)](https://esgf.esrl.noaa.gov/projects/wam_ipe/WAM). 
+The name of the data component is DATAWAM.  All field exchanges in the
+system occur through the 
+\ref sw_mediator "space weather mediator". This is a technical
+(non-scientific) milestone to ensure that field data is passed
+correctly from WAM through the mediator and then down through the IPE
+cap to the model itself. In this milestone, analytic fields are used
+to validate the transfer of data, so the data coming into IPE from
+mediator isn't physically correct. Therefore, the output from IPE is
+not expected to be realistic. In this revision DATAWAM runs on a
+global 3D reduced Gaussian grid (\ref GRID_wam "WAM grid").
+The horizontal resolution is T62. The vertical component of this grid
+is 150 levels in pressure and has to be converted to height in order
+to couple with IPE. Because the relationship between pressure and
+height varies during a run, the actual heights of the levels of the
+WAM grid varies during a run. The maximum height of the WAM grid is
+approximately 800 km.  In this revision IPE runs on an 80 x 170 flux
+tube grid (\ref GRID_IPE "IPE grid") that extends up to approximately 360,000
+km. Because of the difference in heights, the WAM grid only overlaps
+with the bottom of the IPE grid. The amount of the overlap depends on
+the current height of the WAM grid.
+
+ NUOPC "caps", which are essentially wrappers for the coupling interface, are provided for all model components allowing these components to work in NEMS and other NUOPC-compliant systems.  The version of each component used is:
+
+* DATAWAM
+* IPE
+
+\todo add pages for DATAWAM and IPE
+
+In this release only a subset of possible fields are exchanged between
+DATAWAM and IPE. The 
+[coupling fields spreadsheet](http://docs.google.com/spreadsheets/d/1ThbEHNh2ZV7PMdcsitEt4wz5CZkojlANK4hszkLdNAs/edit#gid=0)
+indicates in detail the status of the different coupled fields.
+
+ * DATAWAM fields are built to simulate output from WAM. To do this
+   they are built on a 2D unstructured mesh which represents the 2D
+   shuffled reduced-Gaussian grid of the WAM model. The 2D WAM
+   information (e.g. coordinates) used to construct this grid are read
+   in from a file. DATAWAM exports height information as a 3D field
+   built on the 2D grid. The WAM model's vertical coordinate is at
+   fixed pressure levels, so its actual height varies time-step by
+   time-step. To simulate this variation, DATAWAM's height field is
+   generated analytically every time-step using a random number
+   generator. The data fields output by DATAWAM are filled with an
+   analytic function so that the quality of the transfer can be
+   checked in IPE.
+     
+ * The IPE input fields advertised only represent a subset of the full
+   fields used by IPE. The reason for this is that the WAM and IPE
+   grids only overlap at the bottom of the IPE grid. Transferring the
+   entire IPE grid to the mediator would be inefficient, so only the
+   part that potentially overlaps with WAM is transferred and used for
+   regridding. In the IPE cap the fields received from the mediator
+   are copied into a variable (wamfield) which has the full index
+   space of the IPE fields. This transfer is complex because both
+   representations of the data are collapsed to 1D for efficiency's
+   sake. Once in the wamfield variable the data is extrapolated to
+   fill the empty region. If acting as part of a coupled system, IPE
+   uses the data from wamfield as part of it's computations. In this
+   technical milestone, we will examine the data in wamfield to verify
+   that it was transferred correctly from DATAWAM.
+     
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the 
+[WAM-IPE Build & Run](https://esgf.esrl.noaa.gov/projects/wam_ipe/build_run)
+page.  
+
+\todo migrate WAM-IPE Build & Run page
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the default Regional configuration
+is provided below.  For details on the run sequence in general please
+refer to the 
+\ref configuring
+and
+\ref architecture
+pages.
+
+    runSeq::
+     @21600.0
+       ATM -> MED :remapMethod=redist
+       MED
+       MED -> IPM :remapMethod=redist
+       ATM
+       IPM
+     @
+    ::
+
+Validation
+----------
+
+The validation procedure for this milestone is to verify the correct
+transfer of analytic fields from DATAWAM to IPE.  This transfer
+includes redistribution from DATAWAM to the mediator, regridding in
+the mediator, redistribution from the mediator to the IPE cap and then
+the connection from the IPE cap to the model itself.  To verify the
+connection, the average relative error is computed between the
+transferred field in the IPE model and the exact analytic value that
+it was set to in DATAWAM. If this error is equal to or smaller than
+the expected maxium error (1.0E-4) then the transfer is considered
+correct.
+
+The analytic function used for the validation is the function
+F=cos(lat)2 *cos(2*lon) + 0.004*height + 1.0 where (lat,lon) are the
+latitude and longitude coordinates of the points and height is the
+height of the point in km.
+
+During this test both components run for a two day simulation,
+coupling every six hours.
+
+A test report is available for the validation run:
+
+ * [Space Weather One-Way Connection to IPE Test Report.](https://esgf.esrl.noaa.gov/projects/couplednems/IPE_Cap_Integration_Test_Report)
+
+\todo move Space Weather One-Way Connection to IPE Test Report to vlab
+
+The following table summarizes the test results for each field. 
+
+| Field                  |     Source Grid               |     Destination Grid        | Average Relative Error | Status |
+| :--------------------- | :---------------------------: | :-------------------------: | :--------------------: | :----: |
+| northward_wind_neutral | WAM T62 Reduced Gaussian Grid | IPE 80 x 170 Flux Tube Grid | 7.96E-5                | OK     |
+| eastward_wind_neutral  | WAM T62 Reduced Gaussian Grid | IPE 80 x 170 Flux Tube Grid | 7.96E-5                | OK     |
+| temp_neutral           | WAM T62 Reduced Gaussian Grid | IPE 80 x 170 Flux Tube Grid | 7.96E-5                | OK     |
+
+\todo reference the above grids
+
+In addition to analytically validating the transfer of all the fields,
+a visual verification was performed on one field
+(northward_wind_neutral) as a sanity check. The following two images
+are from that verification. The top image is of the field at a height
+of 104 km in WAM. The bottom image is of the same field at the same
+height in IPE after being transferred from WAM. Each image is a plot
+of the points making up the field with the color indicating the
+field's values. This type of plot was used to show the difference in
+the grids as well as the values.  Because of the different positions
+of the points in the two plots, there is the appearance of some
+distortion in the fields. However, after examining the images, the
+conclusion was that the transfer happened correctly.
+
+\image html DREV73436-WAM_Field.png
+
+\image html DREV73436-IPE_Field.png
+
+Limitations and Technical Notes
+-------------------------------
+
+This milestone is purely a verification of the transfer of data. Since
+the data being transferred into the IPE model isn't physically
+correct, IPE isn't expected to produce realistic output.
\ No newline at end of file
Index: checkout/doc/configuring.md
===================================================================
--- checkout/doc/configuring.md	(nonexistent)
+++ checkout/doc/configuring.md	(revision 94669)
@@ -0,0 +1,314 @@
+Configuring {#configuring}
+===========
+
+The coupled NEMS application is highly configurable. During
+build-time, i.e. when the NEMS executable is being compiled and
+linked, choices are made as to which model and mediator components are
+built into the system. The built-in components become accessible
+during run-time, allowing the execution of different run
+configurations without the need to rebuild NEMS.
+
+Often the build and run configurability of NEMS is hidden from the
+user by application or test scripts. However, there are times where it
+is useful to understand the technical details on the level of the NEMS
+executable.
+
+Build Configuration
+-------------------
+
+The NEMS build configuration is accessible through the GNU Make based build system. 
+
+The ocean, sea-ice, ionosphere-plasmasphere model, hydraulic model,
+and wave components that are built into the NEMS executable are picked
+via the OCN, ICE, IPM, HYD, and WAV variables, respectively. Each of
+these variables is a comma separated list of models that can be
+specified on the make command line. The currently available options
+for each variable are shown below:
+
+*  `ATM = satm, xatm`
+*  `LND = slnd, xlnd, lis`
+*  `OCN = socn, xocn, mom5, hycom, pom`
+*  `ICE = sice, xice, cice`
+*  `WAV = swav, xwav, ww3`
+*  `IPM = sipm, xipm, ipe`
+*  `HYD = shyd, xhyd, wrfhydro`
+
+For each model type the current non-active instance options are listed
+below. The definition of these two options is similar to their use in
+the 
+[Community Earth System Model (CESM)] (http://www2.cesm.ucar.edu/):
+
++ **Stub components** conform to the
+  [NUOPC rules for model components](https://earthsystemcog.org/projects/nuopc/compliance_testing).
+  They do not advertise any fields in their `importState` or
+  `exportState`. Their primary use is to test control flow between
+  components in a driver.
+
++ **Dead components** conform to the NUOPC rules for model
+components. They advertise fields in the `importState` and `exportState`
+that are appropriate for the specific model type. Import fields may be
+ignored internally. Export fields are filled with data that changes
+during time stepping, but has no scientific relevance. Their primary
+use is in coupled systems with other dead components to test the data
+transfers between components.
+
+All of the variables support the specification of multiple options via
+comma separated list on the right-hand side. The default target of the
+NEMS makefile (i.e. no target specified) will print out a list of
+options with explanation to assist the user with the build
+configuration.
+
+Run Configuration
+-----------------
+
+During run-time of the NEMS executable, it accesses a file called
+nems.configure, which it expects to find in the run directory. This
+file specifies the dynamic component selection, and the exact run
+sequence to be used. Only models built into the executable at
+build-time are accessible during run-time. An error will be triggered
+if an unavailable model is selected in nems.configure. The component
+selection is based on two variables:
+
+    xxx_model:          abc
+    xxx_petlist_bounds: lower upper
+
+Here `xxx` can be `atm`, `ocn`, `ice`, `ipm`, `med`. The `abc`
+stands for the actual instance name, e.g. `fv3` or `mom5`. The
+lower and upper bounds of the petList specification are integer PET
+numbers.
+
+The specification of the run sequence provides the flexibility needed
+to cover different coupling scenarios. The format is line based
+between special tags:
+
+    runSeq::
+        line1
+        line2
+        ...
+        lineN
+    ::
+
+There are a number of format options for each line:
+
+* A time loop is introduced by a `@` symbol, followed immediatly by
+  the number of seconds of the associated time step.
+
+* A time loop is closed by a single `@` symbol on a line.
+
+* The `RUN` method of model component `xxx` is called by specifying
+  just `xxx` on a line. The supported values of `xxx` are the same
+  as for the model specification above. A specific RUN phase can be
+  provided by adding the phase label to the same line, following the
+  model string.
+
+* A connector going from component `xxx` to component `yyy` is
+  specified by a line like this: `xxx -> yyy`. An additional
+  argument on the same line can be used to specify connection options
+  for all of the field handled by the connector. The format and
+  supported values of the connection options is documented in the
+  NUOPC reference manual.
+
+Here is an example of a run sequence specification with two time scales:
+
+    # Run Sequence #
+    runSeq::
+        @7200.0
+        OCN -> MED
+        MED MedPhase_slow
+        MED -> OCN
+        OCN
+        @3600.0
+            MED MedPhase_fast_before
+            MED -> ATM
+            ATM
+            ATM -> MED
+            MED MedPhase_fast_after
+          @
+        @
+    ::
+
+Anything on a line after the `#` symbol is treated as a comment and
+ignored. Indentation in the formatting does not change the meaning of
+a line, but is purely used to increase readability.
+
+
+8.1  Changing the run sequence
+-------------------------------
+
+During run-time of the NEMS executable, it accesses a file called nems.configure, which it expects to find in the run directory. This file specifies the dynamic component selection, and the exact run sequence to be used. Only models built into the executable at build-time are accessible during run-time. An error will be triggered if an unavailable model is selected in nems.configure. The component selection is based on two variables:
+
+    xxx_model:     	   abc
+
+    Xxx_petlist_bounds:   lower upper
+
+Here "xxx" can be "atm", "ocn", "ice", "ipm", "med". The "abc" stands for the actual instance name, e.g. "gsm" or "mom5". The lower and upper bounds of the petList specification are integer PET numbers.
+
+The specification of the run sequence provides the flexibility needed to cover different coupling scenarios. The format is line based between special tags:
+
+    runSeq::
+    line1
+    line2
+    ...
+    lineN
+    ::
+
+There are a number of format options for each line:
+* A time loop is introduced by a "@" symbol, followed immediatly by the number of seconds of the associated time step.
+* A time loop is closed by a single "@" symbol on a line.
+* The RUN method of model component "xxx" is called by specifying just "xxx" on a line. The supported values of "xxx" are the same as for the model specification above. A specific RUN phase can be provided by adding the phase label to the same line, following the model string.
+* A connector going from component "xxx" to component "yyy" is specified by a line like this: "xxx -> yyy". An additional argument on the same line can be used to specify connection options for all of the field handled by the connector. The format and supported values of the connection options is documented in the NUOPC reference manual.
+
+
+    Here is an example of a run sequence specification with two time scales:
+    #Run Sequence#
+    runSeq::
+        @7200.0
+            OCN -> MED
+            MED MedPhase_slow
+            MED -> OCN
+            OCN
+            @3600.0
+            MED MedPhase_fast_before
+            MED -> ATM
+            ATM
+            ATM -> MED
+            MED MedPhase_fast_after
+            @
+        @
+    ::
+
+Anything on a line after the "#" symbol is treated as a comment and ignored. Indentation in the formatting does not change the meaning of a line, but is purely used to increase readability.
+
+Adding a model grid with a different resolution
+-----------------------------------------------
+
+Changing resolutions in coupled applications is far from an automated process at this point. There are a few reasons for this. One is that each of the components has its own procedure for changing grids, and there is no common interface. There is a constraint that ocean and sea ice grids are the same, which requires a utility to generate ice grids from the ocean grid. Additional utilities are required to generate appropriate initial conditions for all the components on the new grids. Finally, the land masks used by atmosphere, ocean, and sea ice components need to be consistent. This is achieved by having the mediator interpolate the land mask used by the ocean and ice components to the atmosphere grid.
+
+The sequence of steps looks like this:
+-# For any component that requires a grid change, use its native procedure to incorporate the new grid. If the ocean resolution changes, generate a new sea ice grid to match the ocean grid.
+-# For any component that requires a grid change, generate new initial conditions consistent with the grid.
+-# Create a standalone (in NEMS, called side-by-side) compset for the component if one does not exist. Verify correct operation of any component with a new grid and initial conditions running the standalone compset. Create a compset for the target coupled configuration.
+-# Run the coupled configuration compset through initialization in order to generate a mask file on the atmosphere grid that is consistent with ocean and ice mask and grids.
+-# Use the atmosphere mask file to generate correct initial condition files for the atmosphere using the chgres and orographic utilities.
+
+A run can now be performed with the changed resolution, using the compset for the coupled configuration.
+
+The steps are explained in more detail below.
+
+### Change component grids using their native procedures.
+
+**FV3**	***(Information is needed from FV3 team)***.
+
+**GSM**	***(Information is needed from GSM team)***.
+
+**MOM**
+
+-# Use MOM5 build in ocean grid generation utility ?MOM5/src/preprocessing/generate_grids/ocean/ocean_grid_generator.csh? to generate ocean grid specify file (grid_spec.nc) with user?s own resolution setting.
+-# Generate ocean initial conditions.
+-# Copy grid_spec.nc file and ocean initial conditions to INPUT directory.
+
+**CICE**
+
+-# CICE will use the same grid as the ocean component.
+-# CICE requires a grid_file and a kmt_file.
+-# The grid file contains the latitudes and longitudes of the corner points as well as the lengths of the North and East faces of the gridcells and the rotation angle from the curvilinear grid to true latitude-longitude grid.
+-# The kmt_file contains the land/ocean mask where 1s are ocean and 0s are land.
+-# A Fortran code (generateCICEfromFMSgridspec.F90) will take the MOM grid definitions and generate the grid and kmt file for CICE.
+
+### Generate new initial conditions for any components with new grids.
+
+**FV3**	***(Information is needed from FV3 team)***.
+
+**GSM**	***(Information is needed from GSM team)***.
+
+**MOM**
+
+Use ?cvtR4p0To5p1? utility to convert NCEP CFSv2 initial conditions (MOM4 based) to   UGCS compatible initial conditions (MOM5 based).
+
+**CICE**
+
+-# Use the NCAR Command Language (NCL) script to take an initial state from CFS and make it CICE compatible.
+-# ncl convert_cice5.ncl
+-# Expects ice_model.res.nc as input and output is cice5_model.res.nc.
+
+#### 3. Use or create side-by-side (standalone) compsets for any components with new grids and verify correct operation. Create a compset for the target coupled configuration.
+
+[This sheet shows the compsets that already exist](https://docs.google.com/spreadsheets/d/1v9tJb03YuCbwDsXff4M5i6jz4lvBxUrhdImqaGnK_IE/edit#gid=0). (The ones for T574 and T1534 GSM do not)
+To create a new compset, see the section called How to Add a New Compset.
+To use CompsetRun, see How to Build & Run.
+
+### Generate a land mask for the atmosphere.
+
+Masks from the ocean/ice models are interpolated to the atmosphere grid using a local area conservation scheme in the NEMS mediator. The result is a fractional land mask file, field_med_atm_a_land_mask.nc. The mediator sends this mask to the GSM. Grid cells with values equal to 1 can be considered "all land". Grid cells with values between 0 and 1 will eventually be part land, part ocean/ice.  However, the GSM atmosphere cannot currently handle cells that are part land, part ocean/ice and must create a binary mask representation using a GSM cutoff parameter. This is hardcoded in the routine GSM/phys/do_physics_one_step.f in the following place:
+
+    !  	-> Mask
+        fldname='land_mask'
+        findex = QueryFieldList(ImportFieldsList,fldname)
+        if (importFieldsValid(findex) .and.
+    & 	importData(1,1,findex) > -99999.0) then
+        do j = 1, lats_node_r
+            do i = 1, lonr
+                aoi_fld%slimskin(i,j) = 1.0
+            if (importData(i,j,findex) \< 0.01) then
+                aoi_fld%FICEIN(i,j) = 0.0
+                aoi_fld%slimskin(i,j) = 3.0
+            endif
+            enddo
+        enddo
+
+Any cells with a value greater than the cutoff parameter will be land. Currently (for revisions after DREV64441) GSM sets anything greater than 0.01 to land.  In GSM, the points that are associated with ocean and ice following the binary land mask and cutoff parameter receive and use ocean and ice exchange field data from those components.
+
+To generate the field_med_atm_a_land_mask.nc file, you must run the coupled system.  The mediator will generate that file during initialization.  The models must be configured to run on the grids and masks of interest.  The initial conditions and other scientific aspects of the model are not important at this point.  In this case, it's best if no mediator restarts are used to initialize the system.  The goal is to set up a run with the models on the correct grids and masks, to have the coupled system initialize, to have the mediator generate conservative mapping files between grids, for the mediator to interpolate the ocean mask to the atm grid conservatively, and to have that data written to a file.  This will all happen during initialization of the system. The model can abort once initialization is complete. 
+
+In the previous step, a compset that included the coupled target components and grids was created. Use CompsetRun to run this compset through initialization following [How to Build and Run](https://esgf.esrl.noaa.gov/projects/couplednems/quick_build_run). You may need to setup new PET layouts and decompositions for the new grids.  And you need to carefully check the mediator output grid files after this short initial run is complete to verify that the grids in the mediator are consistent with the new grids defined by the models. After the compset runs, you should have the field_med_atm_a_land_mask.nc file in your run directory.
+
+### Run chgres and orographic utilities to get appropriate input files for GSM.
+
+Once the land mask for the atmosphere is available, it is necessary to run the chgres and orography utilities in order to get appropriate input files for GSM. The inputs generated by chgres are siganl and sfcanl, which are initial conditions for the atmospheric model. The siganl file contains the sigma level spherical harmonic spectral coefficients in a binary file of the surface pressure, orography, and remaining model 3D dependent variables and tracers. The sfcanl file contains surface and land surface files in physical space (Gaussian grid) used by the physics component.
+
+In the directory of the files is an ieee file and a GrADS control file that reads and plots the ieee file.  The second record of the ieee file is the sea-land mask made by the code.
+The utility chgres uses the sea-land mask (slm) grib file output made by the orography code, if it is present, with the chgres SLMASK exported variable pointing to the Gaussian grib slm file which in turn was made from landmast.txt input to the orography code from the field_med_atm_a_land_mask.nc netcdf file.  In the absence of the export variable SLMASK being set explicitly, the sea-land mask Gaussian file will attempt to be found by chgres in the model fix fields directory and thus be made from the USGS 30" elevations, and the UMD 30" ocean/lake/land mask file.  The same can be expected for the orography file if the export variable OROGRAPHY is not set to a new orography file or the file is not present. The cutoff rule parameter may be changed with recompilation of the orography code.
+
+The sea land mask cutoff parameter (what is the actual parameter name and where is it located?) used to create the set of mask files received on 10/5/15 is believed to be 0, so any cells with value greater than 0 were considered land. This is what has been used to run DREV64441.
+
+After generating the 10/5/15 files, the sea land mask cutoff parameter was set to 0.5, so any cells with a value greater than 0.5 in the fractional netcdf ocean file would be considered land. A new set of mask files was received on 11/17/15 corresponding to the 0.5 cutoff parameter.
+
+Additional files generated by the orography utility and chgres are:
+
+* mtnvar14_126, which is a gwd/mtn blocking file needed for GSM run time fortran number unit 24
+* slmgb126, which is the land model file orography code output
+* orogb126, which is the orography itself
+
+### File Format:
+
+The headers for the signanl/sfcanl file inputs are of the form:
+
+    sighdr siganl.gfs.2015040100
+    jcap
+    126
+    lonb
+    384
+    latb
+    190
+    idate
+    2015041500
+    stop
+    global_sighdr ENDING ?
+    
+and
+
+    sfchdr sfcanl.gfs.2015040100
+    lonb
+    384
+    latb
+    190
+    idate
+    2015041500
+
+###  Potential improvements
+
+*Generating atmosphere land mask offline.* The entire "run the coupled system through initialization" process could be substituted with a preprocessing step that leverages ESMF to generate a conservative mapping file offline to interpolate the ocean mask to the atm grid offline.  That would require the ability to generate ESMF compatible grid files offline for any grids of interest, including corner points in a way that the grid information is identical to the information in the models. Right now, we are using the coupled system to take care of this preprocessing step. 
+
+*Consistency improvement.* It would be preferable if the atmosphere did not choose or determine mask values. The mediator would determine ocean/ice and land grid cells, and send appropriate data. Ideally the mask used by the mediator would be automatically consistent with the mask used for chgres and orography file generation.
+
+*Fractional cells.* The atmospheric model should be able to handle grid cells that are divided into ice/ocean/land portions.
Index: checkout/doc/Makefile
===================================================================
--- checkout/doc/Makefile	(revision 93212)
+++ checkout/doc/Makefile	(revision 94669)
@@ -1,24 +1,56 @@
-NEMSDOC=markdown.md BUILD.md NEWTEST.md OLDTEST.md
-#MODELDOC=modeldoc.md  README.GFS.md README.NMM.md
+LOCAL_DOCS=$(wildcard *.md) $(wildcard *.dox)
+CONTROL_FILES=Makefile prep_inputs.py Doxyfile.IN DoxygenLayout.xml.IN
 
-ifneq ($(wildcard ../../doc/*.md),)
-APPDOC=../../doc/*.md
-else
-APPDOC=
+TARGET=strahan at emcrzdm:/home/www/emc/htdocs/projects/nems-sample-v4
+
+# Find Doxygen
+
+ifndef DOXYGEN
+DOXYGEN=doxygen
 endif
 
-ALLDOC=$(APPDOC) $(NEMSDOC) # $(MODELDOC)
+ifeq ("$(DOXYGEN)","doxygen")
+  THEIA_DOXYGEN=/scratch3/NCEPDEV/hwrf/save/Samuel.Trahan/doxygen-1.8.10/bin/doxygen
+  JET_DOXYGEN=/contrib/doxygen/1.8.10/bin/doxygen
+  LUNA_DOXYGEN=/gpfs/hps/emc/hwrf/noscrub/soft/doxygen-1.8.10/bin/doxygen
+  TIDE_DOXYGEN=/hwrf/noscrub/soft/doxygen-1.8.10/bin/doxygen
 
+  ifneq ($(wildcard $(THEIA_DOXYGEN)*),)
+    DOXYGEN=$(THEIA_DOXYGEN)
+  endif
 
-all: README.html
+  ifneq ($(wildcard $(JET_DOXYGEN)*),)
+    DOXYGEN=$(JET_DOXYGEN)
+  endif
 
+  ifneq ($(wildcard $(LUNA_DOXYGEN)*),)
+    DOXYGEN=$(LUNA_DOXYGEN)
+  endif
+
+  ifneq ($(wildcard $(TIDE_DOXYGEN)*),)
+    DOXYGEN=$(TIDE_DOXYGEN)
+  endif
+endif
+
+default:
+	@echo Specify build mode:
+	@echo doc        = build documentation just for this directory
+	@echo nems-doc   = build full documentation website, including all apps
+	@echo clean      = delete outputs
+	@echo deliver    = copy to website
+	exit 19
+
 clean:
-	rm -f README.html
+	rm -rf html webpage.tar.gz repo_info.sh.inc Doxyfile DoxygenLayout.xml
 
-README.html: $(ALLDOC) Makefile
-	./md2html.py $(ALLDOC) README.html
-	head README.html
+nems-doc:
+	echo "Oopes!  Sam has not implemented this yet."
 
-deliver: README.html README.css
-	scp README.{css,html} \
-	   samuel.trahan at dmzgw.ncep.noaa.gov:/home/www/emc/htdocs/projects/rt/doc/.
\ No newline at end of file
+doc: clean
+	./prep_inputs.py ..
+	set -x ; $(DOXYGEN)
+	tar -cpzf webpage.tar.gz html Doxyfile
+
+deliver: html
+	cd html && rsync -arv . "$(TARGET)/."
+
Index: checkout/doc/DREV90957.md
===================================================================
--- checkout/doc/DREV90957.md	(nonexistent)
+++ checkout/doc/DREV90957.md	(revision 94669)
@@ -0,0 +1,143 @@
+DREV90957: Regional-Nest 0.3   {#milestone_DREV90957}
+============================
+
+\date Last revised: 04/06/2017
+
+Description
+-----------
+
+Regional-Nest 0.3 (DREV 90957) is a two-way configuration of the
+Nonhydrostatic Mesoscale Model on the B Grid (NMMB) and a regional
+configuration of the HYbrid Coordinate Ocean Model (HYCOM). The main
+features of this milestone release include 1) two way coupling between
+HYCOM and NMMB with moving nest support; 2) performance enhancement.
+
+This revision has been run for 2 days using initial condition and
+boundary condition based on hurricane Patricia and exhibits behavior
+that is Earth-like. The initial condition starts at 2015 10 20 12:00
+hours. This is the starting time for HYCOM initialization and
+integration. This regional HYCOM has a 1/12th degree resolution with
+1284x516 data points spanning a geographical region (-179.76, 2.48) to
+(-77.12, 39.98). HYCOM works on a regular lat-lon grid over this
+geographic region. The regional NMMB grid has a single parent domain
+roughly at 0.18 degree resolution with 451x451x42 data points spanning
+a geographic region (-40.5, -40.5) with central location (-94.3,
+18.4). Resolution of the moving outer nest is 231x201x42, 1/3rd of the
+parent domain. Resolution of the moving inner nest is 381x345x42,
+1/9th of the parent domain.  The NMMB grids are Lambertian
+curvi-linear. It can also be thought of as a rotated lat-lon grid.
+
+SST from HYCOM is interpolated from parent domain to the outer and
+inner nest through a combination of interpolation methods: PATCH in
+the ocean and nearest neighbor along the coastal line.
+
+Field Exchange
+--------------
+
+Currently all fields are transferred using bilinear interpolation. The
+following flux fields are transferred between NMMB and HYCOM:
+
+| NMMB->HYCOM              |   HYCOM->NMMB                     |
+| ------------------------ | --------------------------------- |
+| latent heat flux         | sea surface temperature           |
+| sensible heat flux       |                                   |      
+| net longwave             |                                   |   
+| net shortwave            |                                   |
+| zonal momentum flux      |                                   |
+| meridional momentum flux |                                   |
+| precipitation rate       |                                   |
+ 
+Processor Layout and Run Sequences
+----------------------------------
+
+The coupled system runs NMMB and HYCOM concurrently. The processor
+layout and run sequence are detailed below. HYCOM integrates using
+Patricia initial condition. Hycom ignores mediator input on its first
+time step. HYCOM then uses mediator input in subsequent time steps
+after NMMB has integrated and provides valid input in precipitation,
+radiative fluxes, heat fluxes, and momentum fluxes at couping
+intervals.
+
+| Component  | Processor Layout   |
+| ---------- | ------------------ |
+| NMMB       | 0-249              |
+| HYCOM      | 250-393            |
+| MEDIATOR   | 394-453            |
+
+    runSeq::
+      @1800.0
+        MED MedPhase_slow
+        MED -> OCN :remapMethod=redist
+        @600.0
+          MED MedPhase_fast_before
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_fast_after
+        @
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+ 
+Validation
+----------
+
+###  Parent Domain
+
+\image html "DREV90957_d01_sst_001hrs.png" "Plots of SST ingested in NMMB from HYCOM are shown here at 01 hr, 48 hr"
+
+\image html "DREV90957_d01_sst_048hrs.png" "SST received by NMMB parent nest after one hour model simulation time. By now HYCOM has run 2 time steps and sends updated SST to NMMB."
+
+SST received by NMMB parent nest at 48 hr of model simulation.
+
+### Moving Outer Nest
+
+\image html "DREV90957_d02_sst_001hrs.png" "SST received by NMMB outer nest after one hour model simulation time."
+
+\image html "DREV90957_sst_d02_48hrs.png" "SST received by NMMB outer nest at 48 hr of model simulation."
+
+### Moving Inner Nest
+
+\image html "DREV90957_d03_sst_001hrs.png" "SST received by NMMB inner nest after one hour model simulation time."
+
+\image html "DREV90957_d03_sst_048hrs.png" "SST received by NMMB inner nest at 48 hr of model simulation."
+
+Limitation of this milestone release
+------------------------------------
+ 
+Download and Build
+------------------
+
+This revision can be downloaded with the following command:
+
+    svn co -r 90957 https://svnemc.ncep.noaa.gov/projects/nems/apps/Regional-Nest
+
+Instructions on how to download and build a NEMS application are
+discussed in section 1.1.1 and 1.1.2 in the NEMS User's Guide and
+Reference.
+
+The coupled system can be built with the following command after
+download is complete:
+
+    ./NEMS/NEMSAppBuilder
+
+Running the Patricia moving nest compset
+----------------------------------------
+
+Compsets that can be run with this revision are:
+
+ +  cfsr%2015102012_48hr%nest_nmm_hycom%1_12th
+
+To run compsets, start within the UGCS-Seasonal directory and execute
+the NEMS CompsetRun tool by typing:
+
+    ./NEMS/NEMSCompsetRun -compset NEMS/compsets/cfsr%2015102012_48hr%nest_nmm_hycom%1_12th
+
+Currently, the data files are only set up on Theia. The data files for HYCOM can be found at:
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/HYCOM/REGIONAL_HEP20/
+
+Data files for NMMB can be found at:
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/RT-Baselines/NMMB_patricia_nests
Index: checkout/doc/GRID_HYCOM_GLBa0p24.md
===================================================================
--- checkout/doc/GRID_HYCOM_GLBa0p24.md	(nonexistent)
+++ checkout/doc/GRID_HYCOM_GLBa0p24.md	(revision 94669)
@@ -0,0 +1,45 @@
+HYbrid Coordinate Ocean Model (HYCOM) Grid GLBa0.24 {#GRID_HYCOM_GLBa0p24}
+===================================================
+
+Description
+-----------
+
+HYCOM runs on a 0.24 degree tri-polar global grid (GLBa0.24). The
+GLBa0.24 tri-polar grid is a regular spherical grid south of 46.9?N
+and bipolar north of 46.9?N. Longitude runs from 74.24? to 434.0?
+with exact 0.24 degree spacing; and latitude runs from -78.6080? to
+89.9332?.
+ 
+| Long Name                     | Name   | Value    |
+| :---------------------------- | :----- | :------- |
+| Number of longitudinal points | N<sub>i</sub>   | 1500     |
+| Number of latitudinal points  | N<sub>j</sub>   | 1100     |
+| Minimum longitude             | &nbsp; | 74.24    |
+| Maximum longitude             | &nbsp; | 434      |
+| Minimum latitude              | &nbsp; | -78.6080 |
+| Maximum latitude              | &nbsp; | 89.9332  |
+ 
+Data Decomposition
+------------------
+
+The grid and data decomposition is done in the following manner:
+
+1. The latitudes are regularily decomposed into jqr=10 bands, leading
+to 110 latitude values per band.
+
+2. Each latitude band is decomposed into blocks along the
+longitude. The actual size of each block, and the number of blocks is
+flexible within some limits in order to allow for load balancing. The
+limits are set by iqr=20, the maximum number of blocks in each band,
+and idm=150, the maximum number of longitudes per block.
+
+3. Every PET (persistent execution thread, i.e. MPI rank) is
+associated with exactly one lat-lon block. Not all blocks need to be
+associated with PETs, allowing to map out blocks that are fully over
+land.
+
+\image html GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lon.png
+\image html GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_lat.png
+\image html GRID_HYCOM_GLBa0p24-hycom_GLBa0.24_msk.png
+
+ 
Index: checkout/doc/DREV89738.md
===================================================================
--- checkout/doc/DREV89738.md	(nonexistent)
+++ checkout/doc/DREV89738.md	(revision 94669)
@@ -0,0 +1,170 @@
+DREV89738: HYCOM-GSM-CICE {#milestone_DREV89738}
+=========================
+
+\date Last revised: 3/15/2017
+
+Repository URL
+--------------
+
+https://svnemc.ncep.noaa.gov/projects/nems/apps/HYCOM-GSM-CICE/trunk
+
+Description
+-----------
+
+HYCOM-GSM-CICE 0.1 (DREV89738) is a three-way configuration of the Global Spectral Model (GSM), HYCOM (v2.2.98), and Los Alamos Sea Ice Model (CICE5.0). GSM runs on a T126 grid, and HYCOM and CICE run on a 0.24 degree tripolar global grid.
+
+Field Exchange
+--------------
+
+The NEMS mediator receives the model grids at initialization and
+creates a decomposition of those grids on the mediator PETs. The
+generic NUOPC Connector redistributes the data from the model PETs and
+decomposition to the mediator PETs and decomposition. The fields are
+transferred on their native model grids. A full set of fields is
+transferred among components using the mediator.
+
+| From Mediator to HYCOM | From Mediator to GSM | From Mediator to CICE |
+| ---------------------- | -------------------- | --------------------- |
+| mean net shortwave flux (including penetrating shortwave flux through the base of sea-ice) | sea surface temperature | atmosphere lowest level height |
+| mean downward longwave flux | surface temperature (SST merged with sea ice temperature) | zonal wind velocity at the lowest level height |
+| mean upward longwave flux | mean upward longwave flux | meridional wind velocity at the lowest level height |
+| zonal momentum flux (including flux from sea ice) | mean latent heat flux | specific humidity at the lowest level height |
+| meridional momentum flux (including flux from sea ice) | mean sensible heat flux | air density at the lowest level height |
+| sensible heat flux | mean evaporation rate | atmosphere pressure at the lowest level height |
+| latent heat flux | zonal momentum flux | air temperature at the lowest level height |
+| net heat flux from sea ice | meridional momentum flux | mean downward shortwave flux (vis, dir) |
+| net fresh water flux from sea ice | mean ice volume from CICE | mean downward shortwave flux (vis, dif) |
+| precipitation rate | mean snow volume from CICE | mean downward shortwave flux (ir, dir) |
+| sea ice fraction | sea ice fraction | mean downward shortwave flux (ir, dif) |
+| salt flux | sea ice albedo (vis, dir) | mean downward longwave flux |
+| &nbsp; | sea ice albedo (vis, dif) | rainfall rate |
+| &nbsp; | sea ice albedo (ir, dir) | snowfall rate |
+| &nbsp; | sea ice albedo (ir, dif) | freezing/melting potential |
+| &nbsp; | &nbsp; | sea surface temperature |
+| &nbsp; | &nbsp; | sea surface salinity |
+| &nbsp; | &nbsp; | sea surface slope |
+| &nbsp; | &nbsp; | zonal ocean current |
+| &nbsp; | &nbsp; | meridional ocean current |
+
+Validation
+----------
+
+This revision has been run for more than 5 days and exhibits behavior
+that is Earth-like. The plots below show SST and sea surface salinity
+after 5 days of model integration.
+
+\image html DREV89738-validate-1.png
+
+\image html DREV89738-validate-2.png
+
+Run Sequences
+-------------
+
+HYCOM-GSM-CICE includes two run sequences, a cold start sequence and a time integration sequence.
+
+### Cold start sequence
+
+The cold start sequence initializes components using a minimal set of
+files needed by GSM. The cold start sequence runs for an hour to
+generate restart files for the mediator. There is a fast and a slow
+loop, at 15 minutes and 30 minutes, respectively.
+
+    runSeq::
+      @1800.0
+        @900.0
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_prep_ice
+          MED -> ICE :remapMethod=redist
+          ICE
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+
+### Time integration sequence
+
+The time integration sequence is initialized by mediator restart files
+generated by the cold start sequence.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        @900.0
+          MED MedPhase_prep_ice
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_write_restart
+      @
+    ::
+
+Download, Build, Run, and Restart
+=================================
+
+Download and Build
+------------------
+
+    svn co -r 89738  https://svnemc.ncep.noaa.gov/projects/nems/apps/HYCOM-GSM-CICE/trunk  work
+    cd work/HYCOM/config
+    cp Global/Aintelrelo_nems_nuopc  Aintelrelo_nuopc
+    cd ../..
+    ./NEMS/NEMSAppBuilder
+    cp NEMS/exe/NEMS.x  run_dir/NEMS/exe
+
+Cold Start Run and Restart Run
+------------------------------
+
+Compsets are located in work/run_dir/NEMS/compsets directory. Compsets that can be run with this revision are:
+
+* `cfsr%20150401_1hr_nems%cold_gsm%slg%T126_cice%0.24_hycom%0.24`
+* `cfsr%20150401_1day_nems_gsm%slg%T126_cice%0.24_hycom%0.24`
+* `cfsr%20150401_5days_nems_gsm%slg%T126_cice%0.24_hycom%0.24`
+
+To run compsets, start within the run_dir directory and execute
+NEMSCompsetRun:
+
+    cd work/run_dir
+    ./NEMS/NEMSCompsetRun -compset <compset name>
+
+If you leave off the `-compset` argument, CompsetRun will read the
+compset list from a local file called `HYCOM-GSM-CICE.compsetRun`.
+
+To initialize a new case of the HYCOM-GSM-CICE from a cold start, run
+the cold start compset,
+`cfsr%20150401_1hr_nems%cold_gsm%slg%T126_cice%0.24_hycom%0.24`, to
+generate initial mediator restart files. That compset runs the
+atm/ice/ocean sequentially for 1 hour. It will generate some initial
+mediator restart files consisting of initial values for coupling
+fields.
+
+After running the cold start compset, edit `NEMS/NEMSCompsetRun` by
+modifying `MED_INPUT_DIR` in "setup_med_nems" to change the location
+of the cold start mediator restart files. This will copy the mediator
+restart files from your cold start run directory into the new run
+directory.
+
+Once the NEMSCompsetRun is modified, run a restart compset like
+`cfsr%20150401_1day_nems_gsm%slg%T126_cice%0.24_hycom%0.24` to advance
+the model from the initial conditions. The system will start with the
+same atmosphere, ocean, and ice initial conditions as the cold start
+run plus the new mediator restart files, and the model will run
+concurrently.
\ No newline at end of file
Index: checkout/doc/scan_repo.pl
===================================================================
--- checkout/doc/scan_repo.pl	(nonexistent)
+++ checkout/doc/scan_repo.pl	(revision 94669)
@@ -0,0 +1,34 @@
+#! /usr/bin/env perl
+
+my $nems_rev='unknown';
+my $app_rev='unknown';
+my $nems_loc='nems';
+my $app_name='app';
+my $app_loc='app';
+
+if( open(FH,'svn info .. |') ) {
+    while(defined($_=<FH>)) {
+        chomp;
+        /^Revision: (\d+)/ and $nems_rev=$1;
+        # URL: https://svnemc.ncep.noaa.gov/projects/nems/branches/update-docs
+        m(^URL: .*?(branches/\S+|tags/\S+|[^/]+)$) and $nems_loc=$1;
+    }
+}
+
+if( open(FH,'svn info ../.. |') ) {
+    while(defined($_=<FH>)) {
+        chomp;
+        /^Revision: (\d+)/ and $app_rev=$1;
+        #URL: https://svnemc.ncep.noaa.gov/projects/nems/apps/NEMSGSM/trunk
+        m(^URL: https://.*?/apps/([^/]+)) and $app_name=$1;
+        m(^URL: .*?(branches/\S+|tags/\S+|[^/]+)$) and $app_loc=$1;
+    }
+}
+
+print(
+"nems_rev=\"$nems_rev\"
+app_rev=\"$app_rev\"
+nems_loc=\"$nems_loc\"
+app_loc=\"$app_loc\"
+app_name=\"$app_name\"
+")

Property changes on: checkout/doc/scan_repo.pl
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: checkout/doc/HowTo_OldToNewStruct_buildrun.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_buildrun.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_buildrun.md	(revision 94669)
@@ -0,0 +1,197 @@
+Step 5. Build, Run and Document  {#HowToOldToNewBuildRun}
+==========================
+
+ 
+Step 5.1:
+---------
+
+Try to build your component. To do this, make sure to run the proper
+command for your shell.  To find out which shell you are using:
+
+    echo $0
+
+In tcsh:
+
+    ./NEMS/NEMSAppBuilder app=coupledGSM_WW3 >& coupledbuild.log &
+
+In bash or ksh:
+
+    ./NEMS/NEMSAppBuilder app=coupledGSM_WW3 > coupledbuild.log 2>&1 &
+
+Alternatively, you could use the GUI mode:
+
+    ./NEMS/NEMSAppBuilder
+
+Make sure you select your new app (coupledGSM_WW3 in our case) You can
+find more detailed information on the AppBuilder here
+ 
+You will be able to tell your build was successful by looking to see
+if the NEMS.x executable exists in the NEMS/exe folder.  The
+coupledbuild.log will give you information about the build to look for
+errors if the executable is not built successfully.
+
+\todo link to the help for build not running 
+ 
+Step 5.2:
+---------
+
+Now try to run your compset.  From your top level directory: 
+
+    ./NEMS/NEMSCompsetRun -f
+ 
+For more information on the NEMSCompsetRun go to: 
+
+\todo link to doc NEMSCompsetRun  options
+\todo link to doc  How to restart one job
+\todo link to doc  How to interpret results and find log files for each job
+(note the rtreport doesn't have the log file for the failed jobs listed) 
+ \todo link to doc on CompsetRun, rewind, resume etc. 
+\todo Where do we change the 
+\todo how to only run a single compset 
+\todo how to change the account (nems, marine-cpu, etc)
+\todo how to change the queue 
+
+
+Step 5.3: 
+-------- 
+
+Hopefully, the system is now running.  For this example you can see where
+the app is at the following tag: 
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/tags/NEMSTutorial/StepLast
+
+If you would like you can try this app: 
+
+    $svn co https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/tags/NEMSTutorial/StepLast UpAppEx
+    $cd UpAppEx 
+    $./NEMS/NEMSCompsetRun -f 
+
+
+All options for the NEMSCompsetRun can be found at 
+\todo link to all options for the NEMS compset run, including on how to set/change the account (nems, marine-cpu, etc) and how to change the queue
+
+The above command should then gives the following output: 
+    
+
+    $ ./NEMS/NEMSCompsetRun -f
+    06/19 12:33:17Z NEMSCompsetRun-INFO:  Test suite subset = *
+    06/19 12:33:17Z NEMSCompsetRun-INFO:  Starting: exe('account_params')
+    06/19 12:33:17Z NEMSCompsetRun-INFO:    - and will capture output.
+    Processing Unix group file /etc/group
+    Processing Allocation file /home/admin/userdb/theia_allocations.csv
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Auto-chosen project for job submission is 'fv3-cpu'
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Auto-chosen ptmp is '/scratch4/NCEPDEV/stmp4/Jessica.Meixner'
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Parsing compset descriptions.
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Verifying repo fingerprint against data fingerprint.
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Baseline fingerprint matches repo fingerprint. Rejoice.
+    06/19 12:33:25Z NEMSCompsetRun-INFO:    Baseline fingerprint file: /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/REGTEST-FINGERPRINT.md
+    06/19 12:33:25Z NEMSCompsetRun-INFO:    Repository fingerprint file: /scratch4/NCEPDEV/nems/noscrub/Jessica.Meixner/StepLast/parm/REGTEST-FINGERPRINT.md
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Generating workflow with id 60503.
+    06/19 12:33:25Z NEMSCompsetRun-INFO:  Requested test has been generated.
+    06/19 12:33:25Z rtrun INFO: check dependencies and submit jobs...
+    06/19 12:33:26Z rtrun INFO: check status...
+    06/19 12:33:26Z rtrun INFO: workflow is still running and no jobs have failed.
+    06/19 12:33:26Z rtrun INFO: sleep 2
+    06/19 12:33:28Z rtrun INFO: get queue information
+     Job ID   Reserv.     Queue    Procs ST Queue Time  Stdout Location
+    -------- -------- ------------ ----- -- ----------- ------------------------------------
+    25340171          batch            1 Q  06/19 12:33 /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/build_gsm_ww3.x.log
+    From qstat -x  job list (age 0 sec.)
+    06/19 12:33:28Z rtrun INFO: sleep 100
+    From qstat -x  job list (age 0 sec.)
+    06/19 12:33:28Z rtrun INFO: sleep 100
+    06/19 12:35:08Z rtrun INFO: check dependencies and submit jobs...
+    06/19 12:35:09Z rtrun INFO: check status...
+    06/19 12:35:09Z rtrun INFO: workflow is still running and no jobs have failed.
+    06/19 12:35:09Z rtrun INFO: sleep 2
+    06/19 12:35:11Z rtrun INFO: get queue information
+     Job ID   Reserv.     Queue    Procs ST Queue Time  Stdout Location
+    -------- -------- ------------ ----- -- ----------- ------------------------------------
+    25340171          batch            1 R  06/19 12:33 /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/build_gsm_ww3.x.log
+    From qstat -x  job list (age 0 sec.)
+    06/19 12:35:11Z rtrun INFO: sleep 100
+        ...
+    06/19 12:59:10Z rtrun INFO: check dependencies and submit jobs...
+    06/19 12:59:10Z rtrun INFO: check status...
+    06/19 12:59:10Z rtrun INFO: workflow is still running and no jobs have failed.
+    06/19 12:59:11Z rtrun INFO: sleep 2
+    06/19 12:59:13Z rtrun INFO: get queue information
+     Job ID   Reserv.     Queue    Procs ST Queue Time  Stdout Location
+    -------- -------- ------------ ----- -- ----------- ------------------------------------
+    25340210          debug           60 R  06/19 12:48 /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/test_cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188.log
+    From qstat -x  job list (age 0 sec.)
+    06/19 12:59:13Z rtrun INFO: sleep 100
+    06/19 13:00:53Z rtrun INFO: check dependencies and submit jobs...
+    06/19 13:00:53Z rtrun INFO: check status...
+    06/19 13:00:53Z rtrun INFO: workflow is still running and no jobs have failed.
+    06/19 13:00:54Z rtrun INFO: sleep 2
+    06/19 13:00:56Z rtrun INFO: get queue information
+     Job ID   Reserv.     Queue    Procs ST Queue Time  Stdout Location
+    -------- -------- ------------ ----- -- ----------- ------------------------------------
+    25340210          debug           60 R  06/19 12:48 /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/test_cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188.log
+    From qstat -x  job list (age 0 sec.)
+    06/19 13:00:56Z rtrun INFO: sleep 100
+    06/19 13:02:36Z rtrun INFO: check dependencies and submit jobs...
+    06/19 13:02:36Z rtrun INFO: check status...
+    06/19 13:02:36Z rtrun INFO: workflow is complete and all jobs succeeded.
+    06/19 13:02:37Z NEMSCompsetRun-INFO:  generate report
+    06/19 13:02:37Z NEMSCompsetRun-INFO:  copy build logs to /scratch4/NCEPDEV/nems/noscrub/Jessica.Meixner/StepLast/NEMS/tests/../../log/report-theia-log
+    Report says test succeeded.
+    TEST RESULT: PASS
+
+
+Note from this we can see which queue (project), where the output directory will be: 
+the ptmp directory, the workflow id and more. This also gives you links to the log files of the 
+different parts of the execution.  
+
+In this case, the log file for the build is /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/build_gsm_ww3.x.log
+and the logfile for the compset is /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/log/test_cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188.log.   The output of each compset is located in the tmp/<compsetname> directory, so for this case it would be: /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/tmp/cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188. 
+
+For general information about the generated work directories and workflow see: 
+\todo link to description of regtest output folder/workflow directores. 
+
+If your compset fails, you can rerun the compset by going to the workflow directory, in this case: 
+/scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/ and rewinding the job, with the following command: 
+    ./rtrewind <compset name> 
+
+For more information on rtrewind, see: 
+\todo link do doc on rtrewind 
+
+Then, go back to your source code directory, and resume the job.  This requires knowing the path to your workflow directory. Again in this case the workflow directory is /scratch4/NCEPDEV/stmp4/Jessica.Meixner/rtgen.60503/. The command is: 
+    ./NEMS/NEMSCompsetRun --resum <path to workflow directory> 
+
+If your compset fails, some things to look for:
+- Is it just because your baseline data does not exist or is not in the right location?
+- Are all variables set in the compset and in the run_exglobal_fcst section?
+- Are you using the new versions of your branches?
+- Double check that all old changes were migrated to new branches.
+\todo other items to add here? 
+
+
+\todo how to run a subset of the all.input based on the keywords 
+\todo can you run the workflow without a terminal open?  
+\todo is there an option to only run one compset? 
+ 
+Step 5.4:
+-------
+
+Add documentation for your application; this should include a basic 
+description of your application and any other application specific
+infromation you think should be included. Note documentation of 
+individual compsets occurs within the compsets themselves.  Put this
+documentation in the app/doc/README.md. Additional files can be 
+added, but the README is the main page.  For this application the 
+documentation is simply the following: 
+
+
+    UGCS-Weather Application
+    ========================
+    This site documents the UGCS Weather application, which
+    is for coupling ATM<->WAV models.
+    The available components for ATM is GSM and for WAV
+    is WAVEWATCH III.
+    The goal is to determine the impact of including
+    sea-state dependent feedback from the WAV model on
+    both the ATM and WAV models.
+
+
+
Index: checkout/doc/HowTo_OldToNewStruct_appbuilder.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_appbuilder.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_appbuilder.md	(revision 94669)
@@ -0,0 +1,130 @@
+Step 2. Updating AppBuilder  {#HowToOldToNewAppBuilder}
+======================
+ 
+Step 2.1: 
+---------
+
+The next step is to figure out which modules are need for the new
+app. The first thing to do is to figure out which module files the
+copied app is using. To do this, open the existing *.appBuilder files.
+ 
+    $ ls *.appBuilder
+    standaloneGSM.appBuilder  standaloneGSM%gocart.appBuilder
+ 
+For this case there are two choices and the relevant part of the file is: 
+ 
+    case "$FULL_MACHINE_ID" in
+        yellowstone|gaea)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_NUOPC
+            ;;
+        wcoss*|theia)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_700_gsm
+            CONFOPT="gsm_intel_${MACHINE_ID:?}"
+            ;;
+    esac
+     
+The variable `CHOSEN_MODULE` corresponds to a file in the
+`modulefiles/<FULL_MACHINE_ID>` directory.
+ 
+In this case, we'll use Theia as our first target.  Theia is using
+ESMF_700_gsm (therefor the modules listed in
+`modulefiles/theia/ESMF_700_gsm`).
+ 
+\note A similar procedure would be used for other platforms.  Note
+that wcoss.phase2 is simply a link from wcoss.phase1.
+ 
+Step 2.2:
+-------- 
+
+Copy the modulefile used in Step 2.1 to start a new modulefile for your app:
+
+    svn cp modulefiles/theia/ESMF_700_gsm   modulefiles/theia/ESMF_700_gsm_ww3 
+ 
+Step 2.3: 
+---------
+
+Compare the modules in your new file (in this case
+`modulefiles/theia/ESMF_700_gsm_ww3`) to the module files used in your
+old app.  In your old app structure, you can determine which modules
+were used by looking in the `*.appBuilder` file in the
+`environment_<platform>()` functions.  Update the module file with any
+needed modules.  You will need to check for the following things:
+
+The first line must be `#%Module#` This can be followed by additional `#`, eg: 
+
+    #%Module#####################################################################
+
+There should be no source commands, for example: 
+
+    source /etc/profile 
+
+Make sure there are no bash specific code such as export statements. 
+Avoid using user installed module files when possible. For example: 
+
+    module use /scratch4/NCEPDEV/nems/save/USER.NAME/Modulefiles
+ 
+\todo These should be put somewhere in documentation about module files and then we 
+should link to that location 
+ 
+Step 2.4:  
+---------
+
+Check the `*.appBuilder` file to see if there is a variable `CONFOPT`
+specified, for example, in the NEMSfv3gfs:
+
+    CONFOPT=configure.fv3.$FULL_MACHINE_ID
+
+This configure file is located in the top level directory conf/.  If
+no CONFOPT is specified, the default is to use
+
+    `configure.nems.<platform>.<compiler>`
+
+Most likely, the default configure file is what you will want to use,
+but you should confirm that the settings are correct for your
+component. In general, these store compilation and link time
+variables.
+ 
+To see the current state of the new app, see the following tag: 
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/tags/NEMSTutorial/Step11
+ 
+Step 2.5: 
+---------
+
+We need to create a new *.appBuilder file.  To do this start by
+copying an existing *.appBuilder file that is as similar as possible
+to your target application:
+
+    svn copy standaloneGSM.appBuilder coupledGSM_WW3.appBuilder
+ 
+Step 2.6: 
+---------
+
+Now you need to update the new `*.appBuilder` file to include your
+desired components. For this app, that's adding WW3, which included the
+following:
+ 
+1. Added WW3 to the list of components. Note there must be a space
+   after the component name and before the ), ie:
+
+       COMPONENTS=( GSM WW3 )
+ 
+2. Add the install and source directories, ie. set the `<APP>_BINDIR`
+   and `<APP>_SRCDIR` variables.  You should find the locations of
+   these files in your old app structure's `*.appBuilder` file. In
+   this case, for WW3:
+
+       # WW3
+       WW3_SRCDIR=$ROOTDIR/WW3
+       WW3_BINDIR=$ROOTDIR/WW3-INSTALL
+ 
+3. Make sure you are pointing to the correct module files for each
+   platform that you have created a new module file for.  In this case,
+   we have updated for Theia.
+
+
+To see the current state, see the tag: 
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/tags/NEMSTutorial/Step13
+
+
Index: checkout/doc/GRID_Front_Range_Regional.md
===================================================================
--- checkout/doc/GRID_Front_Range_Regional.md	(nonexistent)
+++ checkout/doc/GRID_Front_Range_Regional.md	(revision 94669)
@@ -0,0 +1,30 @@
+Front Range Grid (Regional) {#GRID_Front_Range_Regional}
+===========================
+
+Description
+-----------
+
+The regional Front Range grid is a Lambert Conformal projection over
+the Front Range in Colorado, USA.
+
+| Long Name                     | Name    | Value      |
+| :---------------------------- | :------ | :--------- |
+| Number of longitudinal points | N<sub>i</sub>    | 268        |
+| Number of latitudinal points  | N<sub>j</sub>    | 260        |
+| Northernmost latitude         | `L<sub>a1</sub>` | 40.9416 N  |
+| Easternmost longitude         | Lo<sub>1</sub>   | 106.6576 W |
+| Southernmost latitude         | La<sub>2</sub>   | 38.5997 N  |
+| Westernmost longitude         | Lo<sub>2</sub>   | 103.5234 W |
+| Longitudinal increment        | D<sub>i</sub>    | \~1 KM     |
+ 
+Regional Location (Google Earth)
+--------------------------------
+
+\image html GRID_Front_Range_Regional-frontrange_google_earth.jpg
+ 
+Grid Cell Plot (NCL)
+--------------------
+
+\image html GRID_Front_Range_Regional-frontrange_regional.png
+\image html GRID_Front_Range_Regional-frontrange_conus.png
+\image html GRID_Front_Range_Regional-frontrange_global.png
Index: checkout/doc/HowTo_OldToNewStruct_compsets.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_compsets.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_compsets.md	(revision 94669)
@@ -0,0 +1,466 @@
+Step 4. Transition to new Compsets  {#HowToOldToNewCompsets}
+=============================
+ 
+ 
+Step 4.1:  
+---------
+
+Now it is time to start transitioning to the new
+compsets. Additional documentation on compsets can be found here.  To
+create a new compset, start by copying the compset/*.input that is the
+most similar to what you will be running, which is compset/gsm.input
+in this example:
+ 
+    cd compset
+    svn cp gsm.input gsm_ww3.input
+
+\todo link to compsets above step 4.1
+ 
+Step 4.2:
+---------
+
+In your new compset file (compset/gsm_ww3.input), you will want to
+delete all unneeded tests/compsets keeping only one that you will update for your
+case.  If using gsm.input, delete everything except the gfs_slg or
+gfs_eulerian test and change "`test gfs_<test>`" to "`compset
+your_compset_name`".  Note, any `%` signs in the compset name should be
+replaced with `@` signs.
+ 
+In this example, we are converting the old compset
+[cfsr%20150401_1day_leapfrog_gsm%slg%T126_ww3%t188](https://svnemc.ncep.noaa.gov/projects/nems/branches/UGCS-Seasonal/twoWayWW3_from_r80562/compsets/cfsr%2520150401_1day_leapfrog_gsm%25slg%25T126_ww3%25t188)
+ 
+So we will change `test gfs_slg`, to:
+
+    compset cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188 
+ 
+Step 4.3: 
+---------
+
+The next step is to delete any builds you do not need and create any
+builds that you do need. In this example, we do the following:
+ 
+* We are starting with the gsm.x build, as we do not need gocart.
+
+* Next we rename gsm.x to gsm_ww3.x This has to be changed in the
+  `build gsm.x` command as well as in the variables NEMS.x and
+  modules.nems
+
+* Lastly, we need to update the name of the app= in the build
+  variable.  This is the app name that appears in the appBuilder file
+  (`<appname>.appBuilder`) that you created in the top level of your
+  directory.  For this case, this is coupledGSM_WW3.
+
+* Delete any other builds you do not need, which in this case is  build gsm_gocart.x. 
+ 
+Step 4.4: 
+---------
+
+Connect the new build to the compset.  This should be in the top line
+where we replaced gsm.x with gsm_ww3.x and in the build=gsm.x with
+build=gsm_ww3.x line. This is in two places.  The first is the
+dependency list:
+ 
+    compset cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188: gsm_ww3.x {
+ 
+The second is the build variable:
+ 
+    build=gsm_ww3.x
+ 
+Step 4.5: 
+---------
+
+Update the compset description in the variable 
+
+    TEST_DESCR=
+
+which can simply be copying the definition from the  old compset.
+ 
+Step 4.6: 
+---------
+
+Update the CNTL_NAME variable, which should be the name of the
+subdirectory that contains the baseline data. 
+CNTL_DIR directory name).i
+
+Update the CNTL_NAME variable, which should be the name of the 
+subdirectory that contains the baseline data.  (Potentially 
+the CNTL_DIR directory name in the old compset). 
+ 
+    CNTL_NAME='cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188'
+ 
+Step 4.7: 
+---------
+
+Set up the initial condition and baseline directory path variables.
+To do this, we first look at the old compset where we have
+
+    export IC_DIR=$DATADIR/GSM/T126_CFSR_mask%0.5
+ 
+In the new compset, we now set: 
+
+    GSM_IC_NAME="GSM/T126_CFSR_mask%0.5"
+    GSM_IC_DIR="@[plat%BASELINE]/@[GSM_IC_NAME]"
+    CNTL="@[plat%BASELINE]/RT-Baselines/@[CNTL_NAME]"
+ 
+Note that the "`@[plat%BASELINE]`" replaces the `$DATADIR` in the old
+compset and gives you the platform specific folders. Note `BASELINE` for
+each platform is defined in the params.input file in the compsets directory. 
+ 
+Step 4.8: 
+---------
+
+Now we need to update all.input replacing `gsm.input` to the name of
+the file you created (in this case gsm_ww3.input), ie:
+
+    load 'gsm.input'
+
+becomes
+
+    load 'gsm_ww3.input'
+ 
+Now we update the following line: 
+
+    run gfs_slg                @ gfs, standard, baseline, slg
+
+to correspond to the compset we just created: 
+
+    run cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188  @ gfs, standard, baseline, slg
+ 
+After the @ sign in the above line are keywords that allow you to
+define sets of compsets.  For any given set of compsets that have
+bitwise identical output (and hence the same CNTL variable), one and
+only one of the compsets should have the keyword "baseline".  In
+general, this means that all compsets should have "baseline".  The
+only exceptions are when several compsets intentionally have identical
+output, such as for testing bitwise identicality of thread or
+decomposition changes.
+ 
+For this case, we will use "gfs, ww3, standard, baseline, slg" for keywords: 
+
+    run cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188  @ gfs, ww3, standard, baseline, slg
+ 
+All other run lines that correspond to deleted compsets in gsm_ww3.input are
+also removed here.
+ 
+Step 4.9: 
+---------
+
+Next, make the input directory.  You will need to start with the
+NEMSGSM data directory for your platform because it contains the
+latest FIXGLOBAL and other files.  If your platform doesn't have that
+data, you need to copy it from a platform that does.  You can find the
+directories in platforms.input, jet.input, and wcoss.input in the
+compsets/ directory.  For example, for Theia:
+
+    BASELINE="/scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/NEMSGSM/trunk-20170213/data"
+
+\cond 
+and for wcoss: 
+
+    BASELINE="/nems/noscrub/emc.nemspara/RT/NEMSGSM/trunk-20170213/data"
+\endcond 
+ 
+Make a copy of that directory, from the trunk-(date) level, in your application-specific 
+area of the RT directory.  The directory structure looks like this:
+ 
+    RT/(AppName)/(Branch-or-trunk)-(revision-or-date)
+ 
+The RT directory exists at these locations:
+ 
+    Theia: /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT
+    WCOSS 1 & 2: /nems/noscrub/emc.nemspara/RT
+    WCOSS Cray: /gpfs/hps/emc/nems/noscrub/emc.nemspara/RT
+    Jet: /lfs3/projects/hfv3gfs/emc.nemspara/RT/
+
+\todo confirm that Jet's RT is /lfs3/projects/hfv3gfs/emc.nemspara/RT/ 
+
+In our case, we place our data here:
+
+    /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+
+\cond
+    wcoss: /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+\endcond 
+
+If you do not have emc.nemspara access, then place the data in your
+personal area and have an application code manager copy it for you.
+
+Now, copy the data: 
+
+\cond
+Theia:
+\endcond 
+
+    mkdir -p /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+    cd /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+    rsync -arv /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/NEMSGSM/trunk-20170213/data/. .
+    mkdir RT-Baselines
+
+\cond 
+Wcoss: 
+
+    mkdir -p /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+    cd /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531
+    rsync -arv /nems/noscrub/emc.nemspara/RT/NEMSGSM/trunk-20170213/. .
+    mkdir RT-Baselines
+\endcond 
+
+Next, update the BASELINE and BASELINE_TEMPLATE in compsets/platforms.input: 
+
+    BASELINE="/scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531"
+    BASELINE_TEMPLATE="@[BASELINE]"
+
+Note, compsets/platforms.input gets information for wcoss from compsets/wcoss.input. 
+ 
+Step 4.10:
+---------
+
+Within the baseline directory that you just copied, there is a file
+called REGTEST-FINGERPRINT.md which must be updated.  This file
+documents the compset input and baseline data stored outside the
+repository.  It also serves as a verification method to ensure the
+data directory in use matches the one desired by this version of the
+application. Therefore, anytime there is an update to the compset
+inputs or baseline data, this file needs to be updated in the data
+storage area and in the repository.
+ 
+So the two files that much match are: 
+
+    /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/REGTEST-FINGERPRINT.md
+    /<path to svn app directory>/parm/REGTEST-FINGERPRINT.md
+ 
+We updated the file in
+
+    /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/REGTEST-FINGERPRINT.md
+
+and copied those changes to our local copy of /parm/REGTEST-FINGERPRINT.md.
+ 
+Step 4.11:  
+---------
+
+Now we need to copy ALL of the needed initial conditions and other
+needed inputs from GSM for the compset in the input directory, that we
+specified in the gsm_ww3.input.  In this case, we have:
+
+    GSM_IC_NAME="GSM/T126_CFSR_mask%0.5"
+    GSM_IC_DIR="@[plat%BASELINE]/@[GSM_IC_NAME]"
+
+ 
+So for theia, this would correspond to: 
+
+    /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+\cond
+Or wcoss
+
+    /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+\endcond
+ 
+So first, we make this directory: 
+
+\cond
+theia:
+\endcond
+
+    mkdir -p /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+    cd /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+ 
+\cond
+wcoss:
+
+    mkdir -p /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+    cd /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/GSM/T126_CFSR_mask%0.5
+\endcond 
+
+To find where your initial condition data was originally stored, look in your original compset 
+
+    export IC_DIR=$DATADIR/GSM/T126_CFSR_mask%0.5
+
+We find the value of DATADIR in NEMS/NEMSCompsetRun by looking in the correct platforms definition so for theia: 
+
+    export DATADIR=/scratch4/NCEPDEV/nems/noscrub/NEMS-Data
+
+\cond
+and for wcoss: 
+
+    export DATADIR=/climate/noscrub/emc.climpara/NEMS-Data
+\endcond 
+ 
+Then copy the data: 
+
+\cond
+Theia:
+\endcond
+
+    rsync -arv /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/GSM/T126_CFSR_mask%0.5/. .
+ 
+\cond
+wcoss:
+
+    rsync -arv /climate/noscrub/emc.climpara/NEMS-Data/GSM/T126_CFSR_mask%0.5/. .
+\endcond  
+
+Step 4.12: 
+---------
+
+The next step is to get initial conditions and inputs for each of your
+other components.  You can find where these files are using your old
+app, compset and NEMSCompsetRun.  First, look in your old compset for
+component specific set up calls, ie:
+ 
+    # - component specific setup calls ---
+    setup_ww3Case2
+ 
+And then find the corresponding code in the old NEMS/NEMSCompsetRun: 
+
+    setup_ww3Case2(){
+      if [ $MACHINE_ID = theia ] ; then
+        cp -r /scratch4/NCEPDEV/nems/noscrub/Jessica.Meixner/esmf_files/Case2_20160831/* ${RUNDIR}/.
+      elif [ $MACHINE_ID = wcoss ] ; then
+        cp -r /marine/noscrub/Jessica.Meixner/esmf_files/Case2_20170323/* ${RUNDIR}/.
+      fi
+    }
+ 
+Then, copy this information to a folder in your RT directory
+`RT/<AppName>/<branch-or-trunk>-<date>/<component>/<description>` ie:
+
+    cp -r /scratch4/NCEPDEV/nems/noscrub/Jessica.Meixner/esmf_files/Case2_20160831/* /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/WW3/Case2 
+ 
+\cond
+wcoss:
+
+    cp -r  /marine/noscrub/Jessica.Meixner/esmf_files/grid/Case2_20170531/* /nems/noscrub/emc.nemspara/RT/UGCS-Weather/UpdateStructure-20170531/WW3/Case2 
+\endcond  
+ 
+Step 4.13: 
+---------
+
+Now within your new compset, add lines to copy the newly created directory of component specific 
+setup files into the filters input function. This is the line below "# WW3 Specific":
+
+    filters input {
+      # Parse any files that need variables from other scopes, and do
+      # not need fancy scripting.  Presently, this is just the NEMS
+      # and mediator init files.
+      #           WORK FILE <=method==  SOURCE
+      'atmos.configure'     <=atparse=  "@[CONF]/atmos.configure_gfs"
+      'nems.configure'      <=atparse=  "@[CONF]/nems.configure.@[nems_configure].IN"
+      # WW3 Specific
+                        '*' <=copydir= "@[plat%BASELINE]/WW3/Case2"
+    }
+ 
+Step 4.14:
+---------
+
+Now it is time to go through your original compset and update/add appropriate values to
+variables in your new compset. Start with the following variables: 
+
+    NTHREADS=1
+    TASKS=60
+    wallclock=1800 #in seconds 
+
+NOTE: wallclock time is now in seconds and NOT minutes. 
+ 
+
+Step 4.15: 
+---------
+
+Update variables in the prerun=run_exglobal_fcst section.  Variable 
+names in the new scripts match the ones in the
+exglobal_fcst.sh.  In the old system, there were one or more aliases
+for the same variable and some variables set in the compset did not
+actually do anything.  That has been corrected now, but you do need to
+find the correct variable names.  The list of supported variables is
+in exglobal_fcst.input. If a variable is not in exglobal_fcst.input,
+check the master list of GFS variables, maintained by Kate Howard, to 
+see the meaning of your variable and any possible synonyms. The variables
+in your old compset to look at are likely in the gsm configure section. 
+Note that you should put quotes around most variable values. 
+
+In this section you should also define NHRS, the number of hours of 
+simulation, instead of NDAYS as before. This is because NDAYS is
+subsequently internally defined as NDAYS=NHRS/24. 
+
+To add a new variable needs to be added, that is not already supported in 
+exglobal_fcst.input, include it in the prerun section as well as defining
+it within exglobal_fcst.input. 
+
+ 
+Step 4.16:
+---------
+
+Next, we need to make the `atmos.configure` and `nems.configure` files and
+set the appropriate coupling variables.  In this case, the
+atmos.configure file that is used (`@[CONF]/atmos.configure_gfs`) is
+`parm/atmos.configure_gfs`:
+
+    core: gfs
+
+    atm_model:                      @[atm_model]
+    atm_coupling_interval_sec:      @[coupling_interval_fast_sec]
+
+The `@[variable_names]` get substituted with values defined in your
+`*.input` file compset block.  Some of these variables might have
+default values such as atm_model.
+
+
+For the nems.configure file, make sure to add all the needed 
+`@[variable_names] into your new compset as well.  Most likely, this 
+will mean copying the definition of the variables under ?nems.configure? 
+section of your old compset, such as: 
+
+    # - nems.configure ---
+    export_nems
+    export nems_configure=blocked_atm_wav
+    export atm_model=gsm
+    export atm_petlist_bounds="0 47"    
+    export wav_model=ww3
+    export wav_petlist_bounds="47 59"  
+    export coupling_interval_sec=1800.0  # 30min coupling
+     
+First note, the nems_configure variable, determines which template file to use, 
+ie. `@[CONF]/nems.configure.@[nems_configure].IN`, which  corresponds to 
+parm/nems.configure.blocked_atm_wav.IN in this case.
+ 
+The template file in the old structure was located in `NEMS/test/`
+directory. If there were any updates to these files, be sure to
+update them, these are now located at the app level in the `parm/`
+directory.
+ 
+The `@[variable_names]` get substituted with values defined in your
+`*.input` file compset block. Some of these variables might have
+default values such as atm_model. Make sure to define any of the
+non-default `@[variable_names]` for the template file are defined
+in the new compset.  For example the above `nems.configure` block 
+in the new compset becomes:
+
+
+Now add the needed variables in the new compset making sure to put quotes 
+around variable names. For this example, we add: 
+ 
+    # - nems.configure ---
+    nems_configure='blocked_atm_wav'
+    atm_model='gsm'
+    atm_petlist_bounds="0 47"
+    wav_model='ww3'
+    wav_petlist_bounds="47 59"
+    coupling_interval_sec=1800.0  # 30min coupling
+
+Step 4.17
+---------
+
+Add a comment block at the top of the compset section to describe your compset.
+This will be automatically parsed and put with application level documentation. 
+For this compset we added the following: 
+
+    compset cfsr at 20150401_1day_blocked_gsm@slg at T126_ww3@t188: gsm_ww3.x {
+        ##! GSM-WW3 coupling with 30 minute intervals
+        #
+        # This compset couples:
+        #
+        # * Semilagrangian GSM using the T126 grid with
+        # * Wavewatch3 using the t188 grid
+        #
+        # using a blocked coupling scheme with a  
+        # 30 minute timestep.
+
+\todo   are there any other specifics for compset documentation? 
Index: checkout/doc/grids.dox
===================================================================
--- checkout/doc/grids.dox	(nonexistent)
+++ checkout/doc/grids.dox	(revision 94669)
@@ -0,0 +1,20 @@
+/** @page grids Grids
+
+This is a list of commonly used grids in the NEMS system.  Note that
+this list is not comprehensive; other grids are in use and new ones
+are always considered.  Some applications even generate grids on the fly.
+
++ \subpage GRID_gsm
++ \subpage GRID_wam
++ \subpage GRID_mom5_1deg_tripole
++ \subpage GRID_mom5_0p5deg_tripole
++ \subpage GRID_CICE_gx3
++ \subpage Note_on_MOM-CICE_Grids
++ \subpage GRID_HYCOM_GLBa0p24
++ \subpage GRID_HYCOM_POP_glbx1v6
++ \subpage GRID_IPE
++ \subpage GRID_Front_Range_Regional
++ \subpage GRID_LIS_T126
++ \subpage GRID_Reduced_T574
+
+*/
\ No newline at end of file
Index: checkout/doc/Note_on_MOM-CICE_Grids.md
===================================================================
--- checkout/doc/Note_on_MOM-CICE_Grids.md	(nonexistent)
+++ checkout/doc/Note_on_MOM-CICE_Grids.md	(revision 94669)
@@ -0,0 +1,70 @@
+Note on MOM-CICE Grids {#Note_on_MOM-CICE_Grids}
+======================
+
+This page is up-to-date with respect to the version: 
+\ref milestone_DREV58214
+
+There have been several discussions about the ice and ocean grids.
+Several have suggested that the ice and ocean grids should generally
+be the same.  As part of that effort, a CICE grid file was created for
+the 360x200 MOM5 grid. When the models pass the grids to the mediator,
+the masks are identical, but the grids are not.  A closer examination
+was carried out with a conclusion that the differences are small and
+probably expected.
+
+These are ~1 degree grids, 360x200.  Both center and corner
+information is being sent to the mediator.  The ocean and ice grids
+sent to the mediator indicate that the corner points are identical to
+about single order precision.  The center points are different in the
+ocean and ice grids by less than 0.015 degrees which is much, much
+less than half a grid box (which would be about 0.5 degrees).
+
+The following apppears to be happening.  The MOM grid file defines
+center and corner lons and lats for its grid as a starting point.  The
+CICE model is implemented so the corner points are read in and the
+center points are computed on the fly internally.  A separate tool was
+created that converted the MOM corner points and mask to a CICE grid
+file at single precision.  So the fact that the corner points match up
+in the mediator between the ocean and ice at single precision is what
+we'd expect and is correct.  This could be improved by doing this in
+double precision, but it wouldn't have much impact on the quality of
+the science or the differences we're seeing in the center points.
+
+After CICE reads in the corner points, it computes center lons and
+lats internally.  Those points are different from the MOM values by <
+0.015 degrees as mentioned above.  That is a much larger than
+roundoff, but is much smaller than the grid dx and is probably pretty
+reasonable in this case, Where the MOM and CICE grids are probably NOT
+identical is how their grids are defined.  Each model has it's own
+idea of the relationship between the corner, the centers, the edge
+lengths and other gridcell metrics.  A simple way to think about that
+is that one model might place the centers at the average of the
+corners and another model might require that the corners be at the
+average of the centers.  With non-uniform grids, two grid defined in
+this way can never have both matching corner and center lons and lats.
+The grid definition is an inherent part of how models define and
+discretize the equations.  If this is all true, then what it means is
+that MOM and CICE can never have identical values for both centers and
+corners for most grids.
+
+In CESM, the POP and CICE grid centers and corners are identical
+because they both were developed with the goal of having them both
+operate on exactly the same grid.  So that makes life a little easier,
+but the differences in the MOM and CICE grid should not be a problem.
+
+So in summary, I believe the way we've generated the 1 degree grid for
+CICE is completely reasonable and that the differences we're seeing
+between the ocean and ice grids in the mediator, even though the grids
+are "the same", are also reasonable.  I think the next question is how
+do we want to deal with this in the mediator and that's a science
+question. In the current implementation, the mediator treats these as
+unique grids and computes mapping weights between the grids.  To a
+large degree, the grids are almost exactly overlapping so that mapping
+is close to a copy, but not exactly.  I think that's probably the
+right way to do it.  We could also choose to force that fields NOT be
+mapped between the ocean and ice when the grid sizes are the same and
+artificially force the grids to be treated as if they were identical
+in the mediator.  That's also viable in this situation but will
+generally be less accurate.
+
+From notes by A. Craig.
\ No newline at end of file
Index: checkout/doc/GRID_Reduced_T574.md
===================================================================
--- checkout/doc/GRID_Reduced_T574.md	(nonexistent)
+++ checkout/doc/GRID_Reduced_T574.md	(revision 94669)
@@ -0,0 +1,31 @@
+Reduced T574 Grid {#GRID_Reduced_T574}
+=================
+
+Description
+-----------
+ 
+| Long Name                     | Name           | Value  |
+| :---------------------------- | :------------- | :----- |
+| Number of longitudinal points | N<sub>i</sub>  | &nbsp; |
+| Number of latitudinal points  | N<sub>j</sub>  | &nbsp; |      
+| Northernmost latitude         | La<sub>1</sub> | &nbsp; |        
+| Easternmost longitude         | Lo<sub>1</sub> | &nbsp; |         
+| Southernmost latitude         | La<sub>2</sub> | &nbsp; |          
+| Westernmost longitude         | Lo<sub>2</sub> | &nbsp; |           
+| Longitudinal increment        | D<sub>i</sub>  | &nbsp; |            
+
+
+Grid Cell Plot (NCL)
+--------------------
+
+\image html GRID_Reduced_T574-T574r_global.png
+\image html GRID_Reduced_T574-T574r_conus.png
+\image html GRID_Reduced_T574-T574r_frontrange.png
+\image html GRID_Reduced_T574-lis_t126.png
+\image html GRID_Reduced_T574-lis_t126_conus.png
+\image html GRID_Reduced_T574-lis_t126_frontrange.png
+
+Reference
+---------
+
+1. [Master List of NCEP Storage Grids, GRIB Edition 1 (FM92)](http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html), grid is number 126.
Index: checkout/doc/mediator.md
===================================================================
--- checkout/doc/mediator.md	(nonexistent)
+++ checkout/doc/mediator.md	(revision 94669)
@@ -0,0 +1,402 @@
+Mediator Reference {#mediator}
+=======================
+
+\version This page is updated for post-DREV73964 revisions.
+
+This page describes the main NEMS mediator. There is also a 
+\ref sw_mediator "NEMS space weather mediator".
+
+In the NUOPC Layer architecture, the mediator (often called the
+coupler) handles the scientific and customized aspects of coupling for
+a modeling system.  For example, it may handle transfer of coupling
+fields, regridding, merging, treatments of coastlines or custom field
+transformations.
+
+For mediator diagrams and additional description, see the 
+[NEMS mediator overview presentation (Craig, Feb. 6 2015)] (https://esgf.esrl.noaa.gov/site_media/projects/couplednems/pres_1502_NEMS_mediator.pdf)
+
+Overview
+--------
+
+In NEMS, the mediator is a separate gridded component with multiple
+phases.  Phases can be thought of as different subroutines within the
+mediator that can be called in some sequence to carry out the work of
+the mediator.  The mediator runs on its own set of PETs (persistent
+execution threads, similar to processors).  Often the PETs chosen for
+the mediator overlap with other components to optimize resource
+utilization.  The PETs and run sequence are specified by the compset.
+Mediator phases might have names like prep_atm, prep_ocn, restart,
+accumulate, and so forth.  The way the mediator phases are implemented
+is somewhat arbitrary and will probably evolve over time as sequencing
+requirements change.
+
+Mediator PE layout
+------------------
+
+The mediator PETs are set in NEMS in the nems.configure file.  Other
+mediator attributes are also defined there in a section that looks
+like this,
+
+    MED_model:                         nems
+    MED_petlist_bounds:             76 135
+    MED_attributes::
+      Verbosity = 0
+      DumpFields = true
+      DumpRHs = false
+      coldstart = false
+      restart_interval = 0
+    ::
+
+Components
+----------
+
+The main NEMS mediator is capable of technically coupling atmosphere,
+ocean, sea ice, land, hydrology, and wave components. All of these
+component types have demonstrated techically correct passage of fields
+through the mediator. Only the behavior of atmosphere, ocean, and sea
+ice components has been examined to assess correct physical coupling.
+
+All components do not have to be present to operate the NEMS mediator.
+
+Exchange Fields
+---------------
+
+The mediator and all components advertise fields during
+initialization.  NUOPC reconciles those field names and then coupling
+fields are realized and connected.  In order for a field to be
+connected between two components, it has to be advertised in the two
+components as well as in the mediator. Field names that match between
+components are automaticallly coupled in the system.  Any field that
+is exported from one component and imported to another component with
+the same standard name is coupled between those components.
+
+Separately, there is an ability for the mediator to customize the
+coupling interactions via merges or custom calculations where coupling
+fields are derived on the fly from each other.  This is not done
+automatically and require implementation of the custom calculations in
+the mediator.  Typically, these are done in the prep phases.
+
+Coupling Periods
+----------------
+
+There is a slow and a fast coupling period in the NEMS mediator.  The
+slow coupling period is associated with the ocean model and allows the
+ocean model to be coupled at a lower frequency than other components.
+The fast coupling period is for the atmosphere and ice model.  They
+are coupled at the same frequency in the system.
+
+Accumulation and Averaging
+--------------------------
+
+The mediator accumulates all fields between coupling periods for all
+components.  For example, the atmosphere and ice coupling fields are
+accumulated and averaged between calls to the ocean model.  At the
+same time, the ocean fields coupled to the atmosphere and ice models
+are held static between the longer ocean coupling periods.
+
+Grids
+-----
+
+Model grids are passed to the mediator at initialization.  The
+mediator receives those grids and instantiates a decomposition of
+those grids on its PETs (persistent execution threads, similar to
+processors).  The mediator is then able to receive all model fields on
+the native model grids.
+
+The ocean and ice components will generally be run on the same grid
+for science reasons, but the mediator is implemented such that this is
+not a requirement.
+
+Interpolation (Regridding)
+--------------------------
+
+Regridding is performed by the mediator. The regridding weights are
+computed at initialization and depend on the grids and regridding
+method.  The regridding method is defined in the mediator on a
+field-by-field basis when the field is defined. In general, fluxes are
+mapped conservatively, and states are mapped bilinearly. The higher
+order finite element patch method was not used for any fields because
+of an observed reproducibility issue.
+
+In the current revision, fields transferred from the ocean to the sea
+ice component are copied rather than interpolated. This can be done
+because the ocean and sea ice components are on the same grid. When
+different grids are detected for these components, the interpolation
+method defaults to bilinear.
+
+Run Sequence
+------------
+
+The run sequence is evolving as new requirements are defined.  There
+are several mediator phases currently implemented in the mediator and
+a typical UGCS-Seasonal run sequence is set up as follows as shown in
+a typical nems.configure file,
+
+    runSeq::
+     @1800.0
+       MED MedPhase_prep_ocn
+       MED -> OCN :remapMethod=redist
+       OCN
+       @900.0
+         MED MedPhase_prep_ice
+         MED MedPhase_prep_atm
+         MED -> ATM :remapMethod=redist
+         MED -> ICE :remapMethod=redist
+         ATM
+         ICE
+         ATM -> MED :remapMethod=redist
+         ICE -> MED :remapMethod=redist
+         MED MedPhase_atm_ocn_flux
+         MED MedPhase_accum_fast
+       @
+       OCN -> MED :remapMethod=redist
+       MED MedPhase_write_restart
+     @
+    ::
+
+In the file above, the sequence of component run calls (ie. OCN, ICE,
+ATM), field coupling via connectors (ie. ATM to MED), and mediator
+phases (ie. \c "MedPhase_prep_ice", \c "MedPhase_atm_ocn_flux",
+MedPhase_write_restart) are indicated.  The coupling periods are also
+defined (ie. \@1800.0, \@900.0) for the ocean and atmosphere/ice models.
+
+The current implementation of the mediator phases does the following,
+
+\c MedPhase_prep_ocn - prepares the ocean coupling fields for the ocean
+model by averaging the atm/ice coupling fields, interpolating fields
+to the ocean grid, computing any merges or custom field calculations,
+and then filling the ESMF State sent to the ocean model.
+
+\c MedPhase_prep_atm - prepares the atmosphere coupling fields for the
+atmosphere model by interpolating fields to the atmosphere grid,
+computing any merges or custom field calculations, and then filling
+the ESMF State sent to the atmosphere model.
+
+\c MedPhase_prep_ice - prepares the sea ice coupling fields for the sea
+ice model by interpolating fields to the sea ice grid, computing any
+merges or custom field calculations, and then filling the ESMF State
+sent to the sea ice model.
+
+\c MedPhase_atm_ocn_flux - computes the atmosphere/ocean fluxes from
+atmosphere and ocean fields.  The computation is done on the ocean
+grid and data from the atmosphere model are interpolated to the ocean
+grid.  These fluxes can be used in the atmosphere and ocean model.
+
+\c MedPhase_accum_fast - accumulates the atmosphere and ice coupling
+fields for coupling to the ocean model.
+
+\c MedPhase_write_restart - writes mediator restart files.
+
+Reconciliation of Masks
+-----------------------
+
+The land mask implementation is described in more detail here.
+
+### Exchange Field Sign and Direction Conventions
+
+The NEMS mediator uses the convention that heat/water/momentum flux is
+positive downward. There is also a hierachy of "down" with respect to
+models, which from top to bottom is:
+
+ * atm
+ * lnd
+ * rof
+ * ice
+ * ocn
+
+If a flux in the coupler is positive, that means it's transferring
+heat/water/momentum downward from a higher component to a lower
+component.
+
+Some examples:
+
+ * Precip will always be positive from the atm->lnd/ocn/ice
+
+ * Downward shortwave will always be positive from the atm->lnd/ocn/ice
+
+ * Evap will always be negative from the lnd/ocn/ice->atm, that means
+   water is transferred from the surface models to the atmosphere.
+
+ * Precip+runoff will always be positive from \c "atm->lnd->rof->ocn"
+
+ * Atm/ocn fluxes are computed in the mediator and are positive into
+   the ocean.  So, the same sign of fluxes is passed to both the atm
+   and ocean.  For the ocean, positive means heat/water/momentum into
+   the ocean and for the atm, positive means heat/water/momentum out
+   of the atm.
+
+ * The ice computes atm/ice stresses and ocn/ice stresses.  The
+   stresses it computes are stresses on the ice model.  So to meet the
+   convention, it has to pass out the atm/ice stress and -ocn/ice
+   stress because the sign convention says the flux in the coupler is
+   the stress on the ice from the atm and the stress on the ocn from
+   the ice.
+
+Models have to adhere to this convention for fluxes that are exported.  They also have to be aware of this convention for fluxes that are imported. 
+
+This sign convention has some problems.  For instance, if the atm/ice
+and ocn/ice fluxes were computed OUTSIDE the ice model and passed in
+as a merged field, the sign convention would break down.  The sign
+convention in NEMS can be changed in the future but a standard has to
+be defined.  Custom Field Derivations
+
+There is a section of the mediator that allows for custom coupling
+interactions.  In general, coupling fields can be derived and set
+there.  This can be used to derive fields from other fields or to
+change signs or units.  Custom calculations are used to derive the
+downward solar to the ocean component from atmosphere shortwave and
+albedo fields.
+
+Flux Field Treatment
+--------------------
+
+The NEMS system couples heat, water, and momentum fluxes.  In general,
+fluxes are computed at the interface between two components and those
+fluxes are often computed in one of the two components or in the
+mediator.  Fluxes are normally interpolated conservatively, but the
+best regridding method for a particular application is a science
+issue.
+
+The mediator is able to compute fluxes as well as regrid and couple
+fluxes computed in components.  There is no specific constraint on how
+or where the fluxes should be computed.  Often the choice depends on
+the relative coupling frequency, the amount of information required to
+be coupled to compute the fluxes, the sophistication of the flux
+calculation, the relative grid resolution, and whether an exchange
+grid is used.
+
+In NEMS, fluxes are computed in a number of places currently.  The
+atmosphere model currently computes the atmosphere/ocean,
+atmosphere/ice, and atmosphere/land fluxes.  The sea ice model
+computes atmosphere/ice fluxes and ocean/ice fluxes.  The mediator is
+able to compute atmosphere/ocean fluxes.  Normally, it's important for
+the same flux to be used in the components associated with the flux
+interface and for the fluxes to be computed in one place, but this is
+not a requirement.  Again, this is a science issue.
+
+For some components, fluxes need to be merged.  For instance, on the
+atmosphere grid, the ice fraction might evolve in time and the land
+fraction might be static.  For conservation, the merging of fluxes is
+as important as the regridding and reuse of fluxes.  Again, this is a
+science issue.
+
+To compute fluxes,
+
+ * a specific flux method and implementation have to exist or be developed
+
+ * the required coupling fields to compute the flux have to be
+   interpolated and passed into the flux calculation
+
+ * the computed fluxes have to be interpolated and passed to the relevant components
+
+ * there needs to be some coordination between components and the
+   mediator about what fields are coupled and where computations are
+   carried out
+
+In the current implementation, the mediator interpolates the time
+evolving ice fraction from the sea ice model and that is sent to the
+atmosphere model as a coupling field.  The sea ice model computes both
+atmosphere/ice fluxes and ice/ocean fluxes.  The mediator merges the
+atmosphere/ocean fluxes computed in the mediator and the ice/ocean
+fluxes using the ice fraction and passes those fields to the ocean
+model. The mediator also merges the atmosphere/ice fluxes computed in
+the ice model and the atmosphere/ocean fluxes computed in the mediator
+and sends those fluxes to the atmosphere model.  The atmosphere model
+receives the merged atmosphere/ocean and atmosphere/ice fluxes and ice
+fraction and then somehow merges that with the atmosphere/ocean fluxes
+and atmosphere/land fluxes computed within the gsm.
+
+In particular, the fluxes that are computed at the interface are the
+latent and sensible heat flux, evaporation, momentum stress, and
+upward longwave flux.  The atmosphere computes the downward longwave,
+downward shortwave, and precipitation.  The albedos are also critical
+in the computation of the shortwave as it's the net shortwave that has
+to match across the flux interface.  To compute these fluxes in
+various components, states from the atmosphere, ocean, and sea ice
+models such as SST, ocean currents, surface temperature and humidity,
+density, surface winds and others are coupled between the relevant
+models.
+
+In the current implementation, the atmosphere/ocean flux in the
+mediator is computed on the ocean grid.  But again, the location of
+these computations and the appropriate grid is a science issue.
+Fluxes are computed in the components on their native grid.
+Generally, the merges occur on the destination grid after
+interpolation.  All coupling is explicit in the current implementation
+but implicit coupling may be desired in the future.
+
+The atmosphere/ice and atmosphere/ocean fluxes are merged in the
+mediator as follows.  First, we interpolate the ice fraction and
+fluxes onto the atmosphere grid using interpolation weights generated
+at initialization,
+
+    F_ice2atm_a = WM_i2a * F_ice2atm_i
+    F_ocn2atm_a = WM_i2a * F_ocn2atm_i
+    ice_fraction_a = WM_i2a * ice_fraction_i
+
+Then In the mediator, the atmosphere/ice and atmosphere/ocn fluxes are
+merged such that:
+
+    F_iceocn2atm_a = ice_fraction_a*F_ice2atm_a + (1-ice_fraction_a)*F_ocn2atm_a
+
+That flux is passed to the atmosphere model.  In the atmosphere model,
+the land flux, ice flux, and ocn flux are then merged again,
+
+    F_atm_a = ocn_fraction_a * F_iceocn2atm_a + (1 - ocn_fraction_a) * F_lnd2atm_a
+
+where ocn_fraction_a is the clipping destination fraction associated
+with the mapped ocean mask.
+
+While this approach is efficient and simple to implement, this suffers
+from land-ocean masking problem when land and ocean have different
+resolution. One approach to resolve the masking problem is to
+introduce an exchange grid where the ambiguous or unclaimed
+intersection cells between the land and ocean grids can have exclusive
+ownership.  
+
+\todo (NOTE: But I thought we were explicitly specifying the land mask
+as the complement of the ocean mask to avoid any missing areas?)
+
+Field Merging
+-------------
+
+The mediator contains a generic merge method that allows multiple
+fields to be merged together into a destination field. The source
+field(s) can be weighted by fractions or other arrays.  The merging
+method is used to convert mean_laten_heat_flux to mean_evap_rate in
+the atmosphere to ocean coupling.  It is also used to apply fraction
+weighting to merge atm and/or ice fields for the ocean component for
+\c mean_prec_rate, \c mean_fprec_rate, \c mean_evap_rate, \c mean_sensi_heat_flx,
+\c mean_laten_heat_flx, \c mean_down_lw_flx, \c mean_zonal_moment_flx, and
+\c mean_merid_moment_flx.
+
+The following are the field merge calls in the mediator:
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_evap_rate' , &
+                                is%wrap%FBAtm_o, 'mean_latent_heat_flux' ,customwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_prec_rate' , &
+                                is%wrap%FBAtm_o, 'mean_prec_rate' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_fprec_rate' , &
+                                is%wrap%FBAtm_o, 'mean_fprec_rate' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_evap_rate' , &
+                                is%wrap%FBAtm_o, 'mean_evap_rate' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_sensi_heat_flx' , &
+                                is%wrap%FBAtm_o, 'mean_sensi_heat_flx' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_laten_heat_flx' , &
+                                is%wrap%FBAtm_o, 'mean_laten_heat_flx' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_down_lw_flx' , &
+                                is%wrap%FBAtm_o, 'mean_down_lw_flx' ,atmwgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_zonal_moment_flx' , &
+                                is%wrap%FBAtm_o, 'mean_zonal_moment_flx'  ,atmwgt, &
+                                is%wrap%FBIce_o, 'stress_on_air_ice_zonal',icewgt, rc=rc)
+
+    call fieldBundle_FieldMerge(is%wrap%FBforOcn,'mean_merid_moment_flx' , &
+                                is%wrap%FBAtm_o, 'mean_merid_moment_flx'  ,atmwgt, &
+                                is%wrap%FBIce_o, 'stress_on_air_ice_merid',icewgt, rc=rc) 
\ No newline at end of file
Index: checkout/doc/DREV80567.md
===================================================================
--- checkout/doc/DREV80567.md	(nonexistent)
+++ checkout/doc/DREV80567.md	(revision 94669)
@@ -0,0 +1,221 @@
+DREV80567 (DREV87736) UGCS-Seasonal 0.3 {#milestone_DREV80567}
+=======================================
+
+\date Last revised: 2/3/2017
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Seasonal/trunk
+
+Important notes about this revision
+-----------------------------------
+
+\warning
+After the release of this DREV80567 revision, Github made
+changes to their repository software that resulted in incorrect
+mappings when Github revisions were used as SVN externals. This
+resulted in the DREV80567 UGCS-Seasonal application pulling in
+different and arbitrary revisions of its constituent components,
+causing the application to crash. A subsequent fix from Github
+restored the correct mappings.
+\p In order to avoid this issue in the future, explicit tags are now
+being used to pull in Github revisions from SVN. An identical version
+of the original DREV80567 was defined using tags as
+DREV87736. Although DREV80567 appears to be working correctly, we
+recommend that revision DREV87736 be used instead of DREV80567 as it
+is designed to avoid repository issues in the future.
+
+Note also that in order to run this milestone on Theia, you must have
+a ~/.cshrc file in your home directory that contains the line "limit
+stacksize unlimited". The ~/.cshrc with this line must exist no matter
+what shell you are using!  Description
+
+UGCS-Seasonal 0.3 (DREV80567/DREV87736) is a three-way configuration
+of the 
+[Global Spectral Model (GSM)](http://www.emc.ncep.noaa.gov/index.php?branch=GFS),
+[Modular Ocean Model 5 (MOM5)](http://mom-ocean.org/web), and
+[Los Alamos Sea Ice Model (CICE)](http://oceans11.lanl.gov/trac/CICE/). GSM runs on a 
+\ref GRID_Reduced_T574 "T574 grid", and MOM5 and
+CICE run on a 
+\ref GRID_mom5_0p5deg_tripole "0.5 degree tripolar global grid with 0.25 degree tropics."
+A full set of fields is transferred among components using the NEMS
+mediator
+([see exchange field spreadsheet](https://docs.google.com/spreadsheets/d/1Tae7NoGbIfti38QxvmzPy7Z4tIWQgY4zSdq5Xcx9MDk/edit?usp=sharing)).
+
+\todo add and link to grid pages
+
+Relative to 
+\ref milestone_DREV73964 "UGCS-Seasonal 0.2".
+this revision added the capability to peform basic restarts. This
+required modifications to the NEMS mediator and CICE so that restarts
+could be written at more flexible time intervals. This revision also
+included a command-line version of the NEMS AppBuilder. The unphysical
+wind stresses in the northern ocean present in UGCS-Seasonal 0.2 were
+resolved with a sign fix in the ocean/ice stress that the ocean
+sees. Ice initial conditions were updated for the runs performed, and
+now include snow on ice. This revision also includes performance
+optimizations. Both the initialize and run phases of the coupled
+system were profiled and optimized. The performance at the conclusion
+of the optimization process was approximately that of a similarly
+configured version of CFSv2. In addition, a check in the mediator was
+removed that fills in SST and sea ice temp fields on "unset" (land)
+points to 271 deg C before doing a merge.
+
+This revision has been run for more than 30 days and exhibits behavior
+that is Earth-like.
+
+A limitation of this revision is that fluxes are known to be computed
+inconsistently. The atmosphere/ocean flux over open water is computed
+in GSM and merged with the atmosphere/land flux. The atmosphere/ocean
+flux computed in the mediator uses a different algorithm, based on
+CESM code. The ice/ocean flux and ice fraction are computed in CICE.
+
+A short-term fix was requested by EMC scientists and implemented, but
+it is not in this DREV80567 revision. It is available as revision
+84966 on a flux_update branch. In this fix, the atmosphere/ocean flux
+that the ocean model uses over open water is the GSM atmosphere/ocean
+flux (using the mask sent from GSM to exclude the atmosphere/land
+flux). For points that have some fractional ice, a merge of the
+mediator atmosphere/ocean flux plus the ice/ocean flux from CICE is
+used. The atmosphere/ocean flux that the atmosphere model uses is
+basically the same as what the ocean uses.  Over open water, GSM uses
+the atmosphere/ocean flux it computes internally. Where there is some
+fractional ice, the GSM uses the merged atmosphere/ocean/ice flux
+provided by the mediator. The issue with the fix is that although the
+components now see the same fluxes, there is a discontinuity at the
+points where open ocean meets fractional ice, since the
+atmosphere/ocean fluxes for open ocean and ice fraction are still
+computed using different algorithms.  Run Sequences
+
+UGCS-Seasonal includes two run sequences, a cold start sequence and a
+time integration sequence.
+
+Cold start sequence: The first cold start sequence initializes
+components using a miinimal set of files ingested by GSM. The cold
+start sequence only needs to run for a half hour. However, it runs for
+an hour because there is a limitation on running less than an hour in
+EMC scripts.
+
+    runSeq::
+      @1800.0
+        @600.0
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_prep_ice
+          MED -> ICE :remapMethod=redist
+          ICE
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+
+Time integration sequence: The second run sequence, shown below, is
+for the time integration loop. It is initialized by restart files
+generated by the cold start sequence. As in UGCS--Seasonal 0.1, there
+is a fast and a slow loop, at 10 minutes and 30 minutes, respectively.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        @600.0
+          MED MedPhase_prep_ice
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_write_restart
+      @
+    ::
+
+Validation
+----------
+
+The following slides show resolution of the unphysical northern ocean
+wind stresses, before (left) and after (right) the sign change. The
+results are shown after a 5-day run. (From A. Craig.)
+
+\image html DREV80567-ocnspeed_120h.gif
+\image html DREV80567-ocnspeed_new_120h.gif
+
+These results are from a development system and do not represent a NOAA prediction or product. 
+
+Download, Build, Run, and Restart
+---------------------------------
+
+### Download and Build
+
+Instructions on how to download and build a NEMS application are
+discussed in the
+\ref documentation "NEMS User's Guide and Reference".
+Running UGCS-Seasonal with a cold start requires additional
+instructions, below.
+
+Please see Important notes about this revision section at the top of
+this page.  Cold Start and Run
+
+Compsets that can be run with this revision are:
+
+ * `cfsr%20150401_1hr_nems%cold_gsm%slg%T574_cice%0.5_mom5%0.5`
+ * `cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5`
+
+To run compsets, start within the UGCS-Seasonal directory and execute
+the NEMS CompsetRun tool by typing:
+
+    ./NEMS/NEMSCompsetRun -compset <compset name>
+
+If you leave off the -compset argument, CompsetRun will read the
+compset list from a local file.
+
+To initialize a new case of the UGCS-Seasonal from a cold start, run
+the cold start compset,
+`cfsr%20150401_1hr_nems%cold_gsm%slg%T574_cice%0.5_mom5%0.5`, to
+generate initial mediator restart files.  That compset runs the
+atm/ice/ocean sequentially for 1 hour. It will generate some initial
+mediator restart files consisting of initial values for coupling
+fields consistent with the current atmosphere, ocean, and sea ice
+conditions.  You then use those initial mediator files to startup a
+standard run with the same model initial conditions and initial model
+date as the cold start run.  To do this, run the coldstart compset
+using CompsetRun as specified above with the compset,
+`cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5`.
+
+After running the cold start compset, go into NEMS/NEMSCompsetRun and
+modify "setup_med_nems" to pre-stage the cold start mediator restart
+files instead of whatever files are set by default.  This is done in a
+section that looks like:
+
+    cp -f ${DATADIR}/MED_NEMS/${nemsgrid}${nemsgridinp}/* ${RUNDIR}
+    #  cp -f /scratch3/NCEPDEV/stmp1/Anthony.Craig/UGCS-Seasonal.r72808/20150401short_nemscold_gsm_cice_mom5/mediator*restart* ${RUNDIR}
+
+(You will need to adjust the directory, compset, and revision for this milestone.)
+
+Comment out the first line and uncomment the second line. In the
+second line, set the path to the cold start run directory where the
+cold start case just ran.  This will copy the mediator restart files
+from your cold start run directory into the new run directory.
+
+Once the cold start is done and the NEMSCompsetRun is modified, run a
+standard compset like
+`cfsr%20150401_1day_nems_gsm%slg%T574_cice%0.5_mom5%0.5` to advance the
+model from the initial conditions.  The system will start with the
+same atmosphere, ocean, and ice initial conditions as the cold start
+run plus the new mediator restart files, and the model will run
+concurrently.
\ No newline at end of file
Index: checkout/doc/DREV76675.md
===================================================================
--- checkout/doc/DREV76675.md	(nonexistent)
+++ checkout/doc/DREV76675.md	(revision 94669)
@@ -0,0 +1,170 @@
+DREV76675: WAM-IPE 0.3 Send Data Fields from WAM to IPE {#milestone_DREV76675}
+=======================================================
+
+\date 05/31/2016
+
+Repository URL
+--------------
+
+ * https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+
+Description
+-----------
+
+\todo link to WAM and IPE grid pages
+
+This milestone is an internal release of a NEMS application with two
+active components. The active components are the
+[Ionosphere Plasmasphere Electrodynamics (IPE) model](https://esgf.esrl.noaa.gov/projects/wam_ipe/IPE)
+and the
+[Whole Atmosphere Model (WAM)](https://esgf.esrl.noaa.gov/projects/wam_ipe/WAM)
+All field exchanges in the system occur through the
+\ref sw_mediator "space weather mediator".
+This is a technical (non-scientific) milestone
+to ensure that field data is passed correctly from WAM through the
+mediator and then to IPE. In this revision WAM runs on a global 3D
+reduced Gaussian grid (\ref GRID_wam "WAM grid"). The horizontal resolution is
+T62. The vertical component of this grid is 150 levels in pressure and
+has to be converted to height in order to couple with IPE. Because the
+relationship between pressure and height varies during a run, the
+actual heights of the levels of the WAM grid varies during a run. The
+maximum height of the WAM grid is approximately 800 km.  In this
+revision IPE runs on an 80 x 170 flux tube grid (\ref GRID_IPE "IPE grid")
+that extends up to approximately 360,000 km. Because of the
+difference in heights, the WAM grid only overlaps with the bottom of
+the IPE grid. The amount of the overlap depends on the current height
+of the WAM grid.
+
+NUOPC "caps", which are essentially wrappers for the coupling
+interface, are provided for all model components allowing these
+components to work in NEMS and other NUOPC-compliant systems. For
+example, the "IPE cap" allows the IPE model to work as a part of the
+NEMS system.
+
+In this release only a subset of possible fields are exchanged between
+WAM and IPE. The
+[coupling fields spreadsheet](https://docs.google.com/a/noaa.gov/spreadsheets/d/1eA9134T3NSvxPKLsZ-Ryth7_fsAU5FjmgAseApov9eY/pubhtml)
+indicates in detail the status of the different coupled fields.
+
+* The WAM output fields are extracted each time-step during the
+  dynamics calculation part of the model code. The extraction and
+  calculation necessary to generate the fields required by IPE are
+  done by the subroutine get_w_z(). For efficiency's sake this is only
+  done when WAM is running coupled to IPE. That this is occurring is
+  indicated by setting the wam_ipe_coupling namelist variable to
+  true. The fields are stored in the
+  get_variables_for_WAM_IPE_coupling module. From there they are
+  extracted by the WAM cap and passed out to the mediator.
+
+* The IPE input fields advertised only represent a subset of the full
+  fields used by IPE. The reason for this is that the WAM and IPE
+  grids only overlap at the bottom of the IPE grid. Transferring the
+  entire IPE grid to the mediator would be inefficient, so only the
+  part that potentially overlaps with WAM is transferred and used for
+  regridding. In the IPE cap the fields received from the mediator are
+  copied into a variable (wamfield) which has the full index space of
+  the IPE fields. This transfer is complex because both
+  representations of the data are collapsed to 1D for efficiency's
+  sake. Once in the wamfield variable the data is extrapolated to fill
+  the empty region. If acting as part of a coupled system, IPE uses
+  the data from wamfield as part of it's computations. In this
+  technical milestone, we will examine the data in wamfield to verify
+  that it was transferred correctly from WAM.
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the WAM-IPE Build & Run page.  Run Sequence
+
+\todo reference the WAM-IPE Build & Run page.
+
+The NEMS run-time configuration for the WAM-IPE system is provided below.  For details on the run sequence in general please refer to the 
+\ref architecture "Technical Architecture page",
+and
+\ref configuring "Run Configuration"
+page.
+
+    runSeq::
+     @21600.0
+       ATM -> MED :remapMethod=redist
+       MED
+       MED -> IPM :remapMethod=redist
+       ATM
+       IPM
+     @
+    ::
+
+Validation
+----------
+
+The validation procedure for this milestone is to compare the coupling
+fields in WAM to the fields after they've been transferred to
+IPE. This comparison is used to determine if the fields are being
+communicated correctly between the two models.  Because the fields are
+being regridded between the two models the transfer isn't expected to
+be exact, and so a visual comparison was used to check if the fields
+are reasonably similar.
+
+The pairs of images in each of the following sections are from the
+visual check of the correctness of the transfer of the fields. Each
+section contains a pair of images for a different field.The top image
+is of the field at an average height of 94.11 km in WAM. The bottom
+image is of the same field at a height of 94 km in IPE after being
+transferred from WAM.  Each image is a plot of the points making up
+the field with the color indicating the field's values. This type of
+plot was used to show the difference in the grids as well as the
+values. Because the fields are at a slightly different height and
+because regridding isn't exact, there is the appearance of some
+difference in the field between the two plots. However, after
+examining the images, the conclusion was that the transfer happened
+correctly in each case.
+
+During this test both components run for a one hour simulation,
+coupling every three minutes.
+
+A test report is available for this validation run: 
+\subpage REPORT-20160531-WAM-IPE-initial-1way
+
+### Field Temp_Neutral
+
+\image html DREV76675-nt_wam.png
+\image html DREV76675-nt_ipe.png
+
+### Field Northward_Wind_Neutral
+
+\image html DREV76675-nwn_wam.png
+\image html DREV76675-nwn_ipe.png
+
+### Field Eastward_Wind_Neutral
+
+\image html DREV76675-ewn_nam.png
+\image html DREV76675-ewn_ipe.png
+
+### Field Upward_Wind_Neutral
+
+\image html DREV76675-uwn_wam.png
+\image html DREV76675-uwn_ipe.png
+
+### Field O_Density
+
+\image html DREV76675-o_wam.png
+\image html DREV76675-o_ipe.png
+ 
+### Field O2_Density
+
+\image html DREV76675-o2_wam.png
+\image html DREV76675-o2_ipe.png
+
+### Field N2_Density
+
+\image html DREV76675-n2_wam.png
+\image html DREV76675-n2_ipe.png
+
+Limitations and Technical Notes
+-------------------------------
+
+This milestone is purely a verification of the transfer of data. The
+results of this coupling haven't been scientifically validated and so
+shouldn't be expected to behave in a physically realistic manner.
\ No newline at end of file
Index: checkout/doc/DREV53978.md
===================================================================
--- checkout/doc/DREV53978.md	(nonexistent)
+++ checkout/doc/DREV53978.md	(revision 94669)
@@ -0,0 +1,135 @@
+DREV53978: WAM-IPE 0.1 Side by Side Component Run and Mediator Validation {#milestone_DREV53978}
+=========================================================================
+
+\date 04/2015
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE 
+
+Description
+-----------
+
+In this milestone the 
+[Whole Atmosphere Model (WAM)](https://esgf.esrl.noaa.gov/projects/wam_ipe/WAM)/
+[Global Spectral Model(GSM)](http://www.emc.ncep.noaa.gov/index.php?branch=GFS), the 
+[Ionsphere-Plasmasphere Electrodynamics (IPE)](https://esgf.esrl.noaa.gov/projects/wam_ipe/IPE) model,
+and the space weather mediator ran side by side without exchanging
+fields using a NUOPC driver. The milestone was intended as a proof of
+concept. There were two main goals. The first was to demonstrate that
+WAM/GSM, IPE, and the mediator can run within a NUOPC coupled
+system. The second was to demonstrate that a mediator could be built
+to regrid data between the WAM and IPE grids accurately enough to make
+the target fully coupled system feasible. This was a technical
+(non-scientific) milestone.
+
+\todo add WAM-IPE app docs to repo
+
+\todo add grids to repo
+
+In this milestone both model components ran for 2 days starting at
+12/1/2009. The WAM/GSM component ran on the reduced Gaussian 
+\ref GRID_gsm "GSM grid". (WAM is a different configuration of the GSM model with a
+different grid. In this milestone we used the GSM configuration.) The
+IPE component ran on an 80 x 170 flux tube grid (\ref GRID_IPE "IPE grid") that
+extended up to approximately 360,000 km.
+
+The space weather mediator ran independently of the two other
+components, but simulated a regridding between them. To do this it
+read in a grid and data from a WAM file. The WAM grid used for the
+regridding was a global 3D reduced Gaussian grid (\ref GRID_wam "WAM grid"). The
+horizontal resolution was T62 and in the vertical there were 150
+levels in pressure. Because the relationship between pressure and
+height varies during a run, the actual heights of the levels of the
+WAM grid vary during a run. The maximum height of the WAM grid was
+approximately 800 km. It also read in an IPE grid from a file. The IPE
+grid was an 80 x 170 flux tube grid (\ref GRID_IPE "IPE grid") that extended up to
+approximately 360,000 km. Because of the difference in heights, the
+WAM grid only overlaps with the bottom of the IPE grid. Once the grids
+are read in, the mediator then regrids the data from the WAM grid to
+the IPE grid. The regridding procedure used in the space weather
+mediator is more complex than a simple one step regridding. As
+described above, the WAM grid's height varies with pressure, so the
+vertical relationship between the WAM and IPE grids varies each
+time-step. To avoid the expense of recomputing the regridding matrix
+every time-step, quick 1D interpolation in used in the vertical to
+interpolate between the varying WAM grid and a fixed height
+intermediate grid. Full 3D interpolation is then used to interpolate
+between the intermediate grid and the IPE grid. Because the
+intermediate grid doesn't vary during a run, the full 3D regridding
+matrix can be computed between it and the IPE grid once at the
+beginning of the run. Using this method allows the regridding to be
+done efficiently during each time-step of the run despite the varying
+WAM grid.
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the 
+[WAM-IPE Build & Run](https://esgf.esrl.noaa.gov/projects/wam_ipe/build_run) page.
+
+\todo migrate WAM-IPE Build & Run page to repo
+
+Validation
+----------
+
+The validation of this milestone consists of two parts: components
+running within the coupled system, and mediator accuracy. The sections
+below describe the specific tests and results for each of these parts
+for the test run.
+
+The test was run on the zeus system at the Environmental Modeling
+Center.
+
+Components Running Within System
+--------------------------------
+
+Both the WAM/GSM model component and the IPE model component ran to
+completion as part of the coupled system. As this was a proof of
+concept milestone without scientific output this was deemed a
+sufficient result to consider this part of the test a success.
+
+Mediator Accuracy
+-----------------
+
+There were two parts to the mediator accuracy check. The first of
+these was the use of an analytic field to calculate the mean relative
+error in the regridding. The second was the regridding of actual model
+data, and then visual inspection of the resulting field to ensure that
+it looks reasonable.
+
+To verify the accuracy of the regridding in the space weather mediator
+a slighty different version of the code was used. Instead of reading
+data in from a file, this version set the data field's values using a
+function who's output value varied with location. The equation used
+was:
+
+ * F=cos(lat)2 *cos(2*lon) + 0.01*height + 1.0
+
+Using an equation allows the exact values to be computed for the
+destination field. Having the exact values allows the error to be
+computed for the regridding. Using this equation the mean relative
+error of the regridding using the space weather mediator was computed
+to be 9.1E-05. This was deemed sufficiently accurate to transfer data
+for the WAM-IPE coupling.
+
+In addition to analytically validating the transfer of a field, a
+visual verification was performed on the regridding of an actual data
+field (temperature) from WAM. The following two images are from that
+verification. The top image is of the field at a height of 104 km in
+WAM. The bottom image is of the same field regridded to the level at a
+height of 100 km in the IPE grid. Each image is a plot of the field
+with the color indicating the field's values. The IPE plot (bottom) is
+a plot of the points of the IPE grid with the color of each point
+indicating the value. This type of plot was used to show the IPE grid
+as well as the values. Because the fields are at a slightly different
+height and because regridding isn't exact, there is the appearance of
+some difference in the field between the two plots. However, after
+examining the images, the conclusion was that the transfer happened
+correctly.
+
+\image html DREV53978-wamTemp104.png
+\image html DREV53978-IPEtemp100km.png
Index: checkout/doc/introduction.md
===================================================================
--- checkout/doc/introduction.md	(nonexistent)
+++ checkout/doc/introduction.md	(revision 94669)
@@ -0,0 +1,166 @@
+Introduction to NEMS {#introduction}
+====================
+
+The NOAA Environmental Modeling System (NEMS) is the infrastructure
+underlying a **coupled modeling system** that supports predictions of
+Earth's environment at a range of time scales. Examples of other
+coupled modeling systems are the 
+[Community Earth System Model (CEEM)](http://www.cesm.ucar.edu)
+and the
+[Met Office Unified Model] (http://www.metoffice.gov.uk/research/modelling-systems/unified-model)
+
+**A model component** is a software representation of a physical
+domain or process, for example sea ice. It is often developed by a
+team of specialists in that domain. Model coupling is a software
+representation of feedbacks between physical processes. It involves
+modifying the exported fields of a component through grid, unit,
+temporal, and other transformations so that they can be used as the
+inputs for another component.
+
+NEMS includes infrastructure for coupling model components
+representing major Earth system domains and processes.  **External
+model components** have a primary repository that is not at the NCEP
+Envionmental Modeling Center (EMC). In general, model components are
+coupled through the NEMS mediator (in other coupled modeling systems
+this is often called the "coupler").  NEMS also includes some
+specialized mediators; for example, for space weather. In some cases
+in NEMS, the model components are coupled "in-line", meaning that they
+are called directly from another model component instead of having
+fields sent through the mediator.
+
+NEMS can be assembled into a number of different **modeling
+applications** (often shortened to just applications). Modeling
+applications are associated with a purpose, like medium-range
+forecasting; a set of model components; and a set of parameters that
+represent a range of supported options, including grids and
+resolutions. Different NEMS modeling applications can have different
+types and numbers of model components. Also, the same physical domain
+may be represented by different model components in different modeling
+applications. For example, in some NEMS modeling applications the
+ocean component may be the HYbrid Coordinate Ocean Model (HYCOM) and
+in others it may be the Modular Ocean Model (MOM).
+
+[This spreadsheet lists anticipated NEMS modeling applications] (https://docs.google.com/spreadsheets/d/1RS-fTBYnfSIWrJYfalD2lAI-bUOGM0frNPEMIO_ND28/edit#gid=0).
+For each modeling application, the spreadsheet includes the set of
+model components that is the target for final system delivery, and the
+set of components in initial and incremental deliveries. Note that
+this spreadsheet does not include all anticipated configurations!
+There are many test configurations that are not represented on the
+spreadsheet. Documentation about each of the NEMS applications and
+components is in general the responsibility of its original
+developers. 
+[Links to available documentation on NEMS applications and components have been compiled here.](https://docs.google.com/spreadsheets/d/1CLT66uzJrjrsY-um0jB5hU-Gfeh3_VCIJDA4-Ibmu5s/edit#gid=0)
+
+
+Compilation and Execution
+-------------------------
+
+Since there are multiple NEMS modeling applications, each with
+multiple model components, and multiple modes for each model component
+type (prognostic, prescribed data, etc.), there is a lot of complexity
+in the NEMS system. It is important to have a consistent way of
+working with the NEMS code base, and especially of coordinating
+changes to the NEMS code base.
+
+Modeling applications can be assembled using a tool called the
+[NEMSAppBuilder](building).  This tool enables each modeling
+application team to fully control its source code.
+
+Development and analysis of modeling applications in NEMS will require
+the use of other component modes besides prognostic
+components. Prescribed data components, stub components that don't
+send any fields, and other options are needed for technical testing
+and controlled scientific experimentation. The tool called Component
+Sets, or
+[Compsets](https://esgf.esrl.noaa.gov/projects/couplednems/compsets),
+enables testing of each model application in numerous configurations.
+Execution of compsets is done by the
+[NEMSCompsetRun](running).
+
+* \ref running
+* \ref building
+
+Infrastructure
+--------------
+
+[NEMS is built using the Earth System Modeling Framework (ESMF)](https://www.earthsystemcog.org/projects/esmf/)
+infrastructure software. ESMF provides utilities like generation of
+interpolation weights and utilities for calendar and timee management,
+and also wrappers that create a standard component calling
+interface. This enables model components developed at different sites
+to be coupled more easily.
+
+[The National Unified Operational Prediction Capability (NUOPC) Layer] (https://earthsystemcog.org/projects/nuopc/)
+adds additional rules about how ESMF models interact and increases
+their interoperability. The NUOPC Layer covers aspects from the level
+of build dependencies, to standardization of initialization phases,
+all the way to standard names of the exchanged fields. NEMS is an
+example of a modeling system built using the NUOPC Layer architecture.
+
+Architecture 
+------------
+
+The NEMS architecture is based on an ESMF component hierarchy with the
+application driver `MAIN_NEMS` at the top, calling into the
+`NEMS_COMP` component which in turn drives the `EARTH_COMP`
+component. The `EARTH_COMP` drives the `ATM` component (which calls
+into options `GSM, NMMB`, or `FIM`). The architecture allows for
+multiple `EARTH_COMP` instances, supporting ensemble applications such
+as the Global Ensemble Forecast System (GEFS).
+
+Coupled NEMS includes atmosphere, ocean, ice, wave, land,
+aerosol/chemistry, and hydrologic models, with coupling interface and
+utilities based on the 
+[Earth System Modeling Framework (ESMF)](https://www.earthsystemcog.org/projects/esmf/).
+The NEMS applications also utilize intreopereability conventions
+introduced by the 
+[National Unified Operational Prediction Capability (NUOPC)](https://www.earthsystemcog.org/projects/nuopc/).
+
+* \ref architecture
+* \ref structure
+
+Quick Build and Run
+-------------------
+
+
+### 6.1.1   Download
+
+Use the following SVN command to download a specific revision of a NEMS application from its development trunk:
+    
+    svn co -r <REV> 
+    https://svnemc.ncep.noaa.gov/projects/nems/apps/
+    <Application>/trunk <Application>
+
+In this command, `<REV>` stands for a revision number, for example, `73964`. In some places on this site, the prefix `DREV` is used to indicate that the revision is on a development trunk. The prefix R indicates that a revision is on an application trunk. These prefixes should not be included in the command, just the revision number.
+
+Where `<Application>` appears, it can have one of the following values:
+* HYCOM-Ice
+* Regional
+* Regional-Nest
+* UGCS-Weather
+* UGCS-Subseasonal
+* UGCS-Seasonal
+* WAM
+* WAM-IPE
+
+The `SVN` command will download all of the necessary pieces including constituent components from different repositories.  It is possible to access the very latest working revision of the code by omitting the `"-r REV"` part from the `SVN` command when downloading by revision number. This may be convenient during development, however, it is not recommended for validation work where it is critical to keep track of the precise revision information.
+
+There are a few ways to find out more about specific revisions and the features that they contain. The links under the Milestone Revisions header on the left navigation bar describe revisions that are documented and tested for particular [application milestones](https://docs.google.com/spreadsheets/d/1RS-fTBYnfSIWrJYfalD2lAI-bUOGM0frNPEMIO_ND28/edit#gid=0). The development tasks completed for each revision are included in the [task prioritization spreadsheet](https://docs.google.com/spreadsheets/d/1C0k9AfH9DZHmJCW_bSdK2TzfFB9qLjyE8416nqqXjTM/edit#gid=0).
+
+### 6.1.2   Build
+
+Change into the `<Application>` directory created by the SVN command during download. Then execute the `NEMS AppBuilder` by typing:
+
+    ./NEMS/NEMSAppBuilder
+
+A terminal based dialog script will guide you through the build process. The end result of the build process is a NEMS executable `(./NEMS/exe/NEMS.x)` that is configured to run the application.
+
+### 6.1.3   Run
+
+Below are general instructions on how to run a NEMS application. There may be some special or additional directions for running a particular application configuration (for example, for the cold start in UGCS-Seasonal). Please see the links under specific Milestone Revisions for these instructions.
+
+From within the `<Application>` directory execute the `NEMS CompsetRun` tool by typing:
+
+    ./NEMS/NEMSCompsetRun
+
+This command will automatically run all of the compsets listed in the `<Application>.compsetRun` file. This file can be edited to change the compsets that are being run. New `CompsetRun` files can also be created and explicitly specified when calling the `NEMSCompsetRun` tool. Finally, new application specific compsets can be created and added to the repository.
Index: checkout/doc/milestones.dox.IN
===================================================================
--- checkout/doc/milestones.dox.IN	(nonexistent)
+++ checkout/doc/milestones.dox.IN	(revision 94669)
@@ -0,0 +1,16 @@
+/**@page milestones Milestone Revisions
+
+The pages in this section relate to development milestones for NEMS
+applications. The pages' titles include a revision number, application
+name, and milestone number. A revision prefix indicates whether it is
+a branch (DREV) or trunk (R) revision. New development occurs on a
+code repository branch. Stable branch versions are merged back to the
+repository trunk.
+
+Web pages include the purpose of the milestone, grids, run duration,
+validation, and other information.
+
+--MILESTONES GO HERE - do not modify this line--
+
+*/
+
Index: checkout/doc/GRID_IPE.md
===================================================================
--- checkout/doc/GRID_IPE.md	(nonexistent)
+++ checkout/doc/GRID_IPE.md	(revision 94669)
@@ -0,0 +1,40 @@
+Ionosphere-Plasmasphere Electrodynamics (IPE) Grid {#GRID_IPE}
+==================================================
+
+Description
+-----------
+
+Only a portion of the IPE grid (that overlaps with the 
+\ref GRID_wam "Whole Atmosphere Model (WAM)"
+grid up to ~700 km) needs to be represented for coupling to WAM.
+
+| Long Name                                              | Name          | Value |
+| :----------------------------------------------------- | :------------ | :---- |
+| Maximum number of gridpoints along a single flux tube* | `MaxFluxTube` | 1115  |
+| Number of flux tubes in magnetic latitude              | `NLP`         | 170   |
+| Number of flux tubes in magnetic longitude             | `NMP`         | 80    |
+ 
+
+\* Actual number of grid points along a single flux tube can vary from
+11 to 1115 depending on the length of the flux tube.  
+
+Data Decomposition
+------------------
+
+All the grid points along a flux tube are contained on the same
+processor (equivalent to a vertical column for GSM).  Domain
+decomposition is done in both latitude and longitude
+directions. Several field lines may be on the same processor.  The
+model usually runs on 80 processors for the coupling to GSM, although
+the standalone IPE can run with flexible number of processors
+speficied at run time.
+
+Reference
+---------
+
+The entire IPE 3D grid file on Zeus:
+
+ * /scratch1/portfolios/NCEPDEV/swpc/noscrub/Naomi.Maruyama/grid/apex/GIP_apex_coords_global_lowres_new20120705
+
+\todo Remove reference to IPE 3D grid file on non-existent Zeus
+machine, or move it to an accessible location.
\ No newline at end of file
Index: checkout/doc/standards.md
===================================================================
--- checkout/doc/standards.md	(nonexistent)
+++ checkout/doc/standards.md	(revision 94669)
@@ -0,0 +1,1449 @@
+Standards {#standards}
+=========
+
+This page contains itemized requirements for how data, code, and
+documentation is managed within the NEMS system.  This should be
+thought of as a set of rules; further information can be found
+throughout the documentation and milestone release pages.
+
+Operating Principles
+====================
+
+\todo Management review of this section is one of the items identified
+as an action in the CDDM workshop on Sept 1-2, 2016 (?Operating
+Principles?).
+
+The following are a set of operating principles (OP later in the
+document) that apply across multiple categories of requirements.
+
+* Make decisions with the end goal of producing the best possible
+  forecasts, based on evidence rather than assertion.
+
+* Operate efficiently. Avoid redundancy, and favor automation over
+  manual processes.
+
+* Promote transparency. To the extent possible, make code, data,
+  documentation, requirements, policies, and plans openly available
+  and clear.
+
+* Encourage community ownership and participation. Establish processes
+  that enable partners to engage in decision-making.
+
+* Ensure that organizational structures support clear responsibilities
+  and accountability.
+
+Roles
+=====
+
+* **Components**
+
+   * **Component Code Managers** --- people internal to NCEP who
+      * Manage the technical aspects of component code
+      * Manage the connection of the code to the 
+        \ref building "NEMS Build System" and
+        \ref running "NEMS Compset System"
+
+   * **External component points of contact** --- people external to NCEP that:
+      * Are the point of contact for coordination and synchronization
+        with NCEP developers
+
+* **Applications**
+
+   * **Application Leads** --- responsible for scientific and technical
+     development and delivery of a particular application.
+      * Provide status reports to the NEMS development community as needed
+      * Provide Milestone Release Documents as needed. This task 
+        may be delegated.
+
+   * **Application Code Managers** --- responsible for technical
+     aspects of application development
+      * Maintain documentation, code and data for their application
+      * Ensure at least one
+        \ref running "compset"
+        works, so that NEMS framework updates can be tested against the application
+      * Ensure that the 
+        \ref building "NEMS build system (NEMSAppBuilder"
+        is able to compile their application.
+      * Coordinate with other developers to update the application to handle
+        incoming changes to the NEMS framework that may change 
+        the applications output
+
+* **NEMS**
+
+  * **NEMS Lead** --- responsible for scientific and technical
+      development and delivery of a particular application.
+    * Provide status reports to the NEMS development community as needed
+    * Provide Milestone Release Documents as needed.  This task may be delegated
+
+  * **NEMS Code Manager** --- responsible for technical aspects of code development
+    * Tests NEMS framework changes against all applications upon request,
+      including just before a NEMS commit.
+    * Maintains nightly tests of a limited set of compsets for each app
+    * Maintains the NEMS source code, build, and test system.
+
+The current status at EMC is that there is a technical team with the
+following members:
+
+* Mark Iredell: manager
+* Sam Trahan: NEMS code manager
+* Valbona Kunkel: Documentation
+* Hang Lei: Physics interface
+
+Other members of EMC that are sometimes involved include:
+
+* Terry McGuinness - GFS workflow
+* Kate Howard - GFS workflow
+
+
+
+
+Requirements Format and and Collection
+======================================
+
+\todo This section requires management review and
+concurrence. Currently, although EMC participated in formulating these
+guidelines, they are not following them. The convention should be
+modified or replaced with the format and procedures NCEP/EMC intends
+to follow for collecting and disseminating requirements.
+
+Documented requirements and expectations, collected from appropriate
+stakeholders, serve as a foundation for formulating strategies and
+making decisions.
+
+This document introduces a convention for requirements collection and
+presentation. Each entry includes:
+
+* **Id** --- Requirement short identifier and number, e.g. SM1 (Software Management 1)
+
+* **Type** --- Current classifications include goal (general guidance
+  or direction), expectation (exp; an assumption), requirement (req; a
+  necessity), and recommendation (rec; a desire or suggestion).
+
+* **Item** --- Description of the entry.
+
+* **Reason** --- Rationale or motivation for the entry.
+
+* **Source** --- Person or group that originated the entry.
+
+* **Status** --- Implementation status or timeline associated with the entry.
+
+A simple requirements collection process typically involves
+identifying a scope, coordinator or moderator, a set of stakeholders,
+and assembling the requirements through a joint discussion. A new set
+of requirements can be expected to evolve and become a basis for
+action through ongoing vetting, periodic updates, and being referenced
+by those developing plans and products.
+
+
+
+Software Management
+-------------------
+
+
+
+<table>
+<tr><th rowspan="2">SM1</th><th>Minimize Software Repositories Per Component</th></tr>
+<tr><td>
+Minimize the number of software repositories required per
+component. Best practices in software configuration management
+recommend using a shared common repository for development where
+feasible.  New development can be managed using branches (or forks or
+equivalent) with the understanding of a common authoritative source
+(master/trunk) and a procedure for integrating new development into
+the source repository.  This approach utilizes the strengths of
+configuration management tools, while minimizing the work and risk
+involved in maintaining duplicate repositories.  
+</td></tr>
+<tr><th>Type</th><td>goal
+</td></tr>
+<tr><th>Source</th><td>GMTB
+</td></tr>
+<tr><th>Status</th><td>No policy in place
+</td></tr>
+<tr><th>Reason</th><td>OP, avoid duplication
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM2</th><th>Source Code Available</th></tr>
+<tr><td >
+All source code for operational modeling applications and suites must
+be available on EMC computers.  Availability in case there are
+problems.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>NCO<br>
+</td></tr>
+<tr><th>Status</th><td>Implemented<br>
+</td></tr>
+<tr><th>Reason</th><td>Availability in case of problems
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM3</th><th>Accessible Repositories</th></tr>
+<tr><td >
+NEMS modeling applications and suites are expected to utilize multiple
+development repositories, including repositories managed externally
+and by EMC. It must be possible for developers (NOAA and non-NOAA) to
+access codes in the development repositories, workspaces, and
+trackers, with specific permission levels (read, read/write).
+</td></tr>
+<tr><th>Type</th><td>EXP
+<br>**Source**: OAS<br>
+</td></tr>
+<tr><th>Status</th><td>Not fully implemented, some key developers do not have access to workspaces and trackers.<br>
+</td></tr>
+<tr><th>Reason</th><td>Access is needed for development.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM3a</th><th>Current Operational Code is Available</th></tr>
+<tr><td >
+It is essential that the currently operational code can be checked
+out. An exact mirror repository should be maintained that always has
+the latest operational code.  
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>COLA/Kinter<br>
+</td></tr>
+<tr><th>Status</th><td>No policy in place<br>
+</td></tr>
+<tr><th>Reason</th><td>This is needed to streamline the transition from research to operations
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM4</th><th>EMC Repository Developer Responsibilities</th></tr>
+<tr><td >
+The following apply to developers working in an EMC repository:
+* Developer maintains own branch of trunk.
+* Commit work back to branch from working copy frequently.
+* Keep up to date with trunk.
+* Use test harness for regression and suite testing prior to commits.
+* Use ticket system as required.
+</td></tr>
+<tr><th>Type</th><td>EXP<br>
+</td></tr>
+<tr><th>Source</th><td>EMC/Tollman<br>
+</td></tr>
+<tr><th>Status</th><td>Unable to implement because of lack of access to computers and trackers..<br>
+</td></tr>
+<tr><th>Reason</th><td>Follow good software practices for development.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM5</th><th>External Component Development Responsibilities</th></tr>
+<tr><td >
+The following apply to developers working with external components:
+
+* If procedures used in the external, authoritative repository are
+  compatible with NEMS development, the procedures specific to that
+  component will be followed.
+
+* If the external, authoritative repository cannot support component
+  development for NEMS, a development repository for EMC use will be
+  established and the procedures established for EMC repositories
+  followed.
+</td></tr>
+<tr><th>Status</th><td>Implemented.<br>
+</td></tr>
+<tr><th>Reason</th><td>Balance between low process overhead and control over processes.<br>
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM6</th><th>Components have Identified Leaders</th></tr>
+<tr><td >
+There is an identified EMC component lead for all model and suite
+components. There is an external component lead identified for
+external components.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>NCO<br>
+</td></tr>
+<tr><th>Status</th><td>Implemented<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, accountability.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM7</th><th>Identified Leaders for NEMS Documentation</th></tr>
+<tr><td >
+There are identified leads for the overall NEMS system development at EMC.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, accountability.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM8</th><th>Synchronization Path to External Component Repositories</th></tr>
+<tr><td >
+Code changes to external components taking place in their native
+repositories must have a path for synchronization with changes made to
+these components at EMC, and vice versa. It is up to the EMC component
+lead to synchronize changes between development and operational
+repositories, in coordination with the external component lead. If
+necessary, users can download a tar file of a code release and return
+changes via tar file.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>EMC/Tollman<br>
+</td></tr>
+<tr><th>Status</th><td>No controls in place.<br>
+</td></tr>
+<tr><th>Reason</th><td>Need for synchronization of development to maintain coherence in community-based unified system.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM9</th><th>Synchronization Path of Component Code Between Applications</th></tr>
+<tr><td >
+Changes in components and infrastructure made for any given NEMS
+modeling application must have a process for synchronization with
+versions of these codes used by other NEMS applications.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS<br>
+</td></tr>
+<tr><th>Status</th><td>No policy in place..<br>
+</td></tr>
+<tr><th>Reason</th><td>Need for synchronization of development to maintain
+  coherence in community-based unified system.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM10</th><th>Standardized Testing and Implementation System</th></tr>
+<tr><td >
+There is standard regression, suite, operations testing for
+respectively software, science, and implementation.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS<br>
+</td></tr>
+<tr><th>Status</th><td>Key processes not implemented.<br>
+</td></tr>
+<tr><th>Reason</th><td>Critical part of software process.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM11</th><th>Repository Strategy Supporting Many Components and Applications</th></tr>
+<tr><td >
+The repository strategy must support testing and experimentation with many-component modeling applications and suites.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS<br>
+</td></tr>
+<tr><th>Status</th><td>Repository strategy is not fully defined.<br>
+</td></tr>
+<tr><th>Reason</th><td>Needed to manage development in multi-component, multi-application NEMS system.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM12</th><th>Component Versions Associated with Application Versions</th></tr>
+<tr><td >
+It must be possible to easily assemble a version of a particular
+modeling application, with versioned constituent components.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS<br>
+</td></tr>
+<tr><th>Status</th><td>Implemented<br>
+</td></tr>
+<tr><th>Reason</th><td>Needed to manage development in multi-component,
+   multi-application NEMS system.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">SM13</th><th>Availability of Stub, Data, and Active Model Components</th></tr>
+<tr><td >
+Model components must offer active, stub, and data versions for
+testing and experimentation.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS, EMC/Grumbine<br>
+</td></tr>
+<tr><th>Status</th><td>Data versions are not available.<br>
+</td></tr>
+<tr><th>Reason</th><td>Needed for testing and development.
+</td></tr>
+</table>
+
+<br><br>
+
+
+
+Coding standards
+----------------
+
+\todo The proposed standards here need to be reconciled with
+Environmental Equivalence 2. The section should be updated to reflect
+the standards that NCEP/EMC intends to follow.
+
+The following table specifies coding requirements and recommendations
+for a parameterization to be included in CCPP. The intent is to
+promote readability, robustness, and portability without being too
+onerous. The Kalnay rules and work by the NUOPC Physics
+Interoperability Team and EMC personnel had a major impact in creating
+this list. The GSM coding standards described at
+
+* https://svnemc.ncep.noaa.gov/trac/gsm/wiki/GSM%20code%20standards
+
+were taken into account and incorporated as applicable. Unless
+specified otherwise, the Fortran programming language is assumed.
+
+\todo Move the GSM coding standards to a public area
+
+
+
+<table>
+<tr><th rowspan="2">CS1</th><th>Fortran Implicit None</th></tr>
+<tr><td >
+All fortran modules and subroutines will contain `implicit none`
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, GSM<br>
+**Reason**:<br>
+* Assists in writing bug-free code.
+* Understanding implicit type rules is difficult and arcane.
+* Understanding where a variable comes from (local, input argument
+  list, module) is more difficult with implicit typing
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS2</th><th>Fortran \c Intent Attribute</th></tr>
+<tr><td >
+All arguments to subprograms will contain the `intent` attribute
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+* Assists readers in understanding whether a variable is:
+  * read-only: intent(in)
+  * read/write: intent(inout)
+  * effectively uninitialized: intent(out)
+
+* A compiler error will result if code attempts to use a variable
+  differently than specified in its \c intent.
+
+* Declared variables without the \c intent attribute can be understood
+  to be local.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS3</th><th>Fortran 2008 Standard Compliance</th></tr>
+<tr><td >
+No modules or subroutines will violate the Fortran 2008 standard
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Status</th><td>Undetermined<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+* Makes porting to a new compiler easier to near trivial.
+
+* Example: gfortran by default enforces the standard that free-form
+  source lines will not exceed 132 characters. Some compilers by
+  default allow line lengths to exceed this value. Attempts to port
+  codes with line lengths greater than 132 may encounter difficulty.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS4</th><th>Inline Documentation of Variables</th></tr>
+<tr><td >
+All local and argument list variables will have a comment explaining
+the meaning of the variable. An in-line comment on the declaration
+line is sufficient
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Status</th><td>Undetermined<br>
+</td></tr>
+<tr><th>Reason</th><td>Allows readers unfamiliar with the code to more quickly
+  understand how the code works.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS5</th><th>Documenting Fortran Subprograms and Modules</th></tr>
+<tr><td >
+All modules and subprograms will have a documentation block describing functionality
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>Promotes understanding of algorithms and code structure by new users
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS6</th><th>No Fortran Common Blocks</th></tr>
+<tr><td >
+Common blocks are disallowed
+
+</td></tr>
+<tr><th>Type</th><td>Requirement
+
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM
+
+</td></tr>
+<tr><th>Reason</th><td>Deprecated Fortran feature. Modules provide all the
+  functionality of common blocks plus much more.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS7</th><th>Compatible with GNU gfortran</th></tr>
+<tr><td >
+A package must be compilable with the gfortran compiler (or gcc for
+packages coded in C). Runnability and validation can be provided using
+whatever compiler(s) the developer prefers.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>gfortran (and gcc) is free and ubiquitous, and therefore
+  is an ideal choice for canonical compiler.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS8</th><th>Free-Form Fortran</th></tr>
+<tr><td >
+All Fortran source will be free-form
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+* Fixed-form source is hard to read and archaic.
+
+* A 72-column requirement only makes sense for punch cards.
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS9</th><th>Fortran-Callable</th></tr>
+<tr><td >
+All public subprograms will be Fortran-callable
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>Fortran is the most commonly used language for geophysical models.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS10</th><th>Thread-Safe Parameterizations</th></tr>
+<tr><td >
+All parameterizations must be thread-safe (except for initialization
+and finalization methods)
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>
+Many geophysical numerical models are threaded these days, and need to
+be able to invoke physical parameterizations simultaneously from
+multiple threads.
+
+Example code which is NOT thread-safe: Declare a variable `first` and
+initialize it to .true. Then test its value and set some static
+variables if it is .true. This will likely result in wrong answers
+when run in threaded mode.
+
+Solution: Provide an initialization routine which sets the static
+variables outside of threaded regions.
+
+Wikipedia provides a brief overview of thread-safety: 
+
+ * https://en.wikipedia.org/wiki/Thread_safety
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS11</th><th>No parameterization will contain a `stop` or `abort` clause</th></tr>
+<tr><td >
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>If an error condition arises, it is better to set a flag
+and let the caller decide how to handle the condition.<br>
+</td></tr>
+<tr><th>Status</th><td>Not yet implemented.
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS12</th><th>Use of uninitialized variables is disallowed</th></tr>
+<tr><td >
+</td></tr>
+<tr><th>Type</th><td>Requirement
+</td></tr>
+<tr><th>Source</th><td>GMTB, GSM
+</td></tr>
+<tr><th>Reason</th><td>Readability.
+
+Not all compilers can be made to initialize static or stack variables
+to a known value (e.g. zero).
+</td></tr>
+</table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS13</th><th>All array indices must fall within their declared bounds.</th></tr>
+<tr><td >
+</td></tr>
+<tr><th>Type</th><td>Requirement
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM
+</td></tr>
+<tr><th>Reason</th><td>Debuggers will fail when "tricks" are employed which
+  reference arrays outside of their declared bounds.
+</td></tr></table>
+
+<br><br>
+
+<table>
+<tr><th rowspan="2">CS14</th><th>Self-Reproducible Parameterizations</th></tr>
+<tr><td>
+Multiple runs of the same compiled parameterization given identical
+input must produce identical output.  In the case where randomness is
+part of the parameterization, a method must be provided to invoke the
+same random sequence for test reproducibility.
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, NUOPC PI Team, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>Prevents inadvertent errors.
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS15</th><th>Do Not Set Fortran Default Precisions</th></tr>
+<tr><td >
+
+The use of compiler flags specifying default precision is
+disallowed. For example, if 64-bit precision is required, use the
+`kind=` attribute to specify the precision rather than a compiler flag
+such as \c -r8
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>The behavior of flags is compiler-specific, e.g. if the
+  user specifies `real*4 ` does the `-r8` compiler flag
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS16</th><th>List Public Entries in Fortran `module use` Statements</th></tr>
+<tr><td >
+
+With the exception of common libraries which use a well-defined naming
+standard for variables and subroutines, all `module use` statements
+must explicitly state which public entities will be referenced. The
+MPI library is an example of an acceptable exception: All MPI routines
+start with `MPI`, so a blank `use mpi` statement is acceptable.
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>Assists in understanding where various variables and/or
+functions or subroutines are defined.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS17</th><th>No Debugging Code</th></tr>
+<tr><td >
+
+All code intended for debugging purposes only should be removed prior
+to submission for inclusion
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>Readability
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS18</th><th>Deallocate Allocated Arrays</th></tr>
+<tr><td >
+
+All arrays explicitly allocated, must be deallocated when no longer
+needed
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>Readability. Minimize memory usage.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS19</th><th>Default Visibility is `private` </th></tr>
+<tr><td >
+
+The default visibility rule for module variables and procedures should
+be `private` (specified by a single `private` statement near the
+beginning of the module). The `public` attribute is applied to only
+those entities which are needed by other subprograms or modules.
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>Limiting variable and subprogram scope is good programming practice
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS20</th><th>Consistent Case in Fortran Code</th></tr>
+<tr><td >
+
+Consistent use of case is preferred for Fortran code (text strings excepted).
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>While Fortran is a case-insensitive language, variable
+`aBc` should also be expressed that way, and not `aBc` in one place,
+`abc` in another, and `ABC` in another.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS21</th><th>Parameterization Steps</th></tr>
+<tr><td >
+
+A parameterization should contain `init`, `run`, and `finalize`
+methods. The `run` method must be thread-safe.
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>Promotes separation of activities which must be done only once at startup or shutdown, from those which are done on multiple time steps.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS22</th><th>Parameterizations Invoked in Chunks</th></tr>
+<tr><td >
+
+Parameterizations should be able to be invoked in "chunks", where the
+calculations are independent of the fastest-varying subscript.
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+Computational performance is the main reason for this preference. Many
+physical parameterizations in geophysical models contain a dependence
+in the vertical, which means this dimension is unavailable for
+vectorization. Vectorization can provide up to a 16X speedup on modern
+processors.
+
+Example: Outer loop over vertical index `k` can contain vertical
+dependence, but if there is also an inner loop over horizontal index
+`i` that can be vectorized, the code is likely to run much more
+efficiently.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS23</th><th>Don't `GOTO`</th></tr>
+<tr><td >
+
+The use of `GOTO` is strongly discouraged, except where no better
+option is available.
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>Modern languages provide better mechanisms to accomplish
+  the same goal in most cases.  `GOTO` promotes "spaghetti" code,
+  which can be unreadable.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS24</th><th>Nested Scope Indentation</th></tr>
+<tr><td >
+
+Code and declarations within subprograms, loops, and conditional tests
+should be indented. Indenting by 2 or 3 or 4 columns is reasonable
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>Readability. Particularly important for multiply nested
+  loops and/or `if` tests.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS25</th><th>Use Symbolic Fortran Comparison Operators</th></tr>
+<tr><td >
+
+Test operators `<`, `<=`, `>`, `>=`, `==`, `/=` are preferred
+vs. their deprecated counterparts `.lt.`, `.le.`, `.gt.`, `.ge.`,
+`.eq.`, `.ne.`
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB, GSM<br>
+</td></tr>
+<tr><th>Reason</th><td>The modern constructs are easier to read, and more
+  understandable for those unfamiliar with legacy code.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">CS26</th><th>No Bare Constants</th></tr>
+<tr><td >
+
+The use of bare constants (e.g. 2.7) inside of computational regions
+is strongly discouraged. Instead, a named constant (e.g. `some_variable
+= 2.7`) should be declared at the top of the routine or module, along
+with an in-line comment stating its purpose
+
+</td></tr>
+<tr><th>Type</th><td>Recommended<br>
+</td></tr>
+<tr><th>Source</th><td>GMTB<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+Bare constants buried in code is one of the biggest contributors to
+lack of readability and understanding of how code works. "What the
+heck does `2.7` mean???" In addition, using a named constant makes it
+easier to specify precision, e.g. `real*8 some_var = 35.`
+</td></tr>
+</table>
+
+<br><br>
+
+
+
+Documentation Management
+------------------------
+
+In the sections below, OP stands for Operating Principles.
+
+
+
+
+<table>
+<tr><th rowspan="2">D1</th><th>Well-Maintained Documentation</th></tr>
+<tr><td >
+
+Maintain comprehensive, accessible, and up-to-date documentation for
+users, developers, managers, and other stakeholders.
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>Documentation is necessary to operate NEMS.<br>
+</td></tr>
+<tr><th>Status</th><td>Completed initial survey.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D2</th><th>Avoid Duplication of Documentation</th></tr>
+<tr><td >
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, avoid duplication.<br>
+</td></tr>
+<tr><th>Status</th><td>No checks in place.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D3</th><th>Documentation Delivered with Software</th></tr>
+<tr><td >
+
+Documentation should be generated at the time of development and is
+considered part of any software delivery.
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Status</th><td>No checks in place.<br>
+</td></tr>
+<tr><th>Reason</th><td>Encourages timely and accurate documentation.</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D4</th><th>Example Code should be in Regression Tests</th></tr>
+<tr><td >
+
+Any code snippets or code examples in the documentation should be
+linked via documentation generation tools to actual code that is
+included in regular regression testing.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>Minimize the maintenance burden.<br>
+</td></tr>
+<tr><th>Status</th><td>Some implementation, no checks in place.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D5</th><th>Public Visibility of Documentation</th></tr>
+<tr><td >
+
+Documentation that does not present a security concern should be publicly visible. 
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, open for viewing.<br>
+</td></tr>
+<tr><th>Status</th><td>No general agreement.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D6</th><th>Documentation is Accessible</th></tr>
+<tr><td >
+
+Documentation should be accessible to collaborators for contributions.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, open for participation.<br>
+</td></tr>
+<tr><th>Status</th><td>No general agreement.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D7</th><th>Components are Documented</th></tr>
+<tr><td >
+
+ All NEMS system components (e.g. model components, physics library
+ components, workflow components) should have general documentation,
+ user documentation, technical documentation, and, as applicable,
+ scientific references. We define general documentation to include at
+ least a high-level description of the software and its purpose. User
+ documentation includes how to checkout, build and run but not
+ necessarily modify code. Technical documentation is intended for
+ people who need to understand, develop, and change technical code.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposesd by OAS-COLA<br>
+</td></tr>
+<tr><th>Reason</th><td>
+
+User documentation should include how to checkout, compile, install,
+configure, run, and analyze outputs of the system. This includes
+information on how to perform common tasks, such as changing model
+grids and resolutions. Installation documentation should include
+detailed itemization of supported compiler(s) (version(s)), MPI
+implementation(s) (version(s)), batch system(s) and operating
+system(s).  It should also include a step-by-step description for
+installation of dependent libraries (e.g. ESMF) and data sets.
+
+</td></tr>
+<tr><th>Status</th><td>
+
+Initial survey completed.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">D8</th><th>Documentation Versioned with Code</th></tr>
+<tr><td >
+
+Scientific, user, and technical documentation should be clearly
+associated with a version of code. This can be done in a maintainable
+manner via documentation generation tools.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>Versioning is critical for understanding and accuracy.<br>
+</td></tr>
+<tr><th>Status</th><td>Implementation started.
+
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+Input Data Management
+---------------------
+
+In the list below, OP stands for Operating Principles.
+
+
+
+<table>
+<tr><th rowspan="2">ID1</th><th>Maintain a single source of input data</th></tr>
+<tr><td >
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, avoid duplication.<br>
+</td></tr>
+<tr><th>Status</th><td>No policy in place.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">ID2</th><th>Minimize the chances of inadvertent modification of input data.</th></tr>
+<tr><td >
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, formalize sharing.<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">ID3</th><th>Easy to Identify and Obtain Input Data for a Configuration</th></tr>
+<tr><td >
+
+Make it as easy as possible to identify what input data is required
+for a configuration and to obtain that data and copy it to a new
+location.
+
+</td></tr>
+<tr><th>Type</th><td>Goal<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, engage through clarity..<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">ID4</th><th>Input Data Not in Personal Directories</th></tr>
+<tr><td >
+
+Input data should be held in project and shared directories or
+repositories and not in personal directories.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, formalize sharing.<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">ID5</th><th>Verifiable Input Data </th></tr>
+<tr><td >
+
+A timestamp or checksum approach should be adopted for input data
+files to help avoid confusion when files are updated.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>None<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented
+
+
+### ID6 Metadata provenance should be standardized for input data
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>None<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+</td></tr>
+</table>
+
+<br><br>
+
+
+
+
+
+
+
+
+Output Data Management
+----------------------
+
+\todo This section requires management review and
+concurrence. Preparation of a plan for the estimation, storage and
+sharing of outputs by the community is one of the actions identified
+in the CDDM workshop on Sept 1-2, 2016 ("Community Data Access Plan").
+
+\todo Link to the Community Data Access Plan document
+
+In the list below, OP stands for Operating Principles.
+
+
+
+<table>
+<tr><th rowspan="2">OD1</th><th>Model Outputs Available to Non-EMC Developers</th></tr>
+<tr><td >
+
+Model outputs must be available on a platform that is open to
+developers and analysts collaborating with EMC.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>COLA<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, avoid duplication.<br>
+</td></tr>
+<tr><th>Status</th><td>No policy in place.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">OD2</th><th>Use Standard Data Formats</th></tr>
+<tr><td >
+
+Model outputs should be suitable for analysis by the usual tools
+(eg. NCL, GrADS, Python-based tools) That implies that they must use
+standard data formats (netCDF, GRIB) and possibly satisfy
+CF-compliance or even ESGF-compliance.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>OAS/COLA<br>
+</td></tr>
+<tr><th>Reason</th><td>OP, formalize sharing.<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">OD3</th><th>Model Outputs in Single Space or Transparently Connected Spaces</th></tr>
+<tr><td >
+
+Model outputs should be stored in a single space, or
+easily/transparently connected multiple spaces.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>Ease of analysis.<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+
+
+</td></tr>
+</table>
+
+<br><br>
+
+
+<table>
+<tr><th rowspan="2">OD4</th><th>Metadata with Model Outputs</th></tr>
+<tr><td >
+
+Model outputs must be stored with metadata sufficient to reproduce the
+runs that generated them.
+
+</td></tr>
+<tr><th>Type</th><td>Requirement<br>
+</td></tr>
+<tr><th>Source</th><td>Proposed by OAS<br>
+</td></tr>
+<tr><th>Reason</th><td>Ease of analysis.<br>
+</td></tr>
+<tr><th>Status</th><td>Not implemented.
+</td></tr>
+</table>
+
+<br><br>
Index: checkout/doc/building.md
===================================================================
--- checkout/doc/building.md	(nonexistent)
+++ checkout/doc/building.md	(revision 94669)
@@ -0,0 +1,372 @@
+Compiling: NEMSAppBuilder {#building}
+=========================
+
+NEMS is the technical backbone for multiple modeling
+applications. These modeling applications are multi-component coupled
+systems that must pull together, in a systematic way, components from
+different centers and agencies. The NEMS AppBuilder enables users to
+construct a specific, versioned modeling application from a versioned
+set of model components and configuration files.
+
+The AppBuilder approach also encourages coordination of changes made
+to different applications as they get checked back into the NEMS
+repository.  The NEMS AppBuilder introduces two levels of
+configuration control: the modeling application level and the NEMS
+level. This split provides the model developer with *full version
+control over all aspects of the modeling application code*, while
+hiding many technical details about the infrastructure at the NEMS
+level.
+
+Under modeling application control:
+* Version of NEMS.
+* List of external components (e.g. MOM5, CICE, ...) required by the application.
+* Version of each required external component.
+* Location of source and installation directory for NEMS and each external component.
+* Compiler and 3rd party library versions.
+
+Under NEMS control:
+* Knowledge base of how to build each of the supported external components.
+* Infrastructure to configure and build NEMS according to the
+  information specified by the application layer.
+* Coupling system based on ESMF/NUOPC.
+
+
+
+
+Running the NEMSAppBuilder
+--------------------------
+
+The NEMS AppBuilder is located within the 
+\ref structure
+as ./NEMS/NEMSAppBuilder. It is important to call the tool
+from within the same directory where the application specific
+AppBuilder description file is located. For information on how to
+create or modify such a build file, see 
+\ref building-new-targets "later sections"
+
+There are two ways to call the NEMSAppBuilder: interactively and
+non-interactively.
+
+### Interactive NEMSAppBuilder Compilation
+
+The NEMSAppBuilder has a user-friendly interface, based on low-level
+terminal-based Unix utilities for maximum ease of use and portability.
+To run an interactive build, simply start NEMSAppBuilder without arguments:
+    
+    [~/UGCS-Seasonal]$ ./NEMS/NEMSAppBuilder
+    
+![](appbuilder-step0.png)
+
+Next each of the external components is built. First CICE, then MOM5:
+
+![](appbuilder-step1.png)
+
+Next the NEMS build system is configured:
+
+![](appbuilder-step2.png)
+
+Finally the NEMS executable is being built:
+
+![](appbuilder-step3.png)
+
+The build finishes with a notification:
+
+![](appbuilder-step4.png)
+
+During the building process the NEMS AppBuilder monitors the progress
+and reports on issues followed by a diagnostic screen. Each diagnostic
+screen has the option to repeat the step that just had failed or to
+bail out. Repeating a step can be useful when the reported issue can
+be corrected in a second terminal and the build process can continue.
+
+If the build process terminates with a problem condition,
+`appBuilder.*log` files with build and debug information will be present
+in the root directory from which the AppBuilder was started.
+
+
+
+### Non-Interacive Compilation
+
+The syntax is:
+
+    ./NEMS/NEMSAppBuilder (options) project=(project)
+
+Here, the `(project)` is the application build target, or "project."
+It corresponds to a `(project).appBuild` file.  The list of possible
+`(build-target)` can be found at the application-level checkout.
+There are several `(build-target).appBuild` files, one for each
+possible value of `(build-target)`
+
+The  `(options)` should be one of the following:
+
+* `rebuild` - clean the source directory before recompiling.
+
+* `norebuild` - do not clean; reuse existing libraries and object
+    files whenever possible.
+
+Further options can be obtained by running
+
+    ./NEMS/NEMSAppBuilder --help
+
+
+
+
+Troubleshooting Failed Builds
+-----------------------------
+
+### Incomplete Checkout
+
+When there are network problems or high server load, your checkout
+from the Subversion and Git repositories may fail.  This will lead to
+any number of confusing errors while building.  With a Subversion
+checkout, you can resume the checkout process by going to the top
+level (above the NEMS directory) and running `svn update`.  Repeat
+that until no more files are updated, and no errors are reported.
+As of this writing, there is no equivalent command for Git.
+
+### Unclean Environment
+
+Setting up your environment incorrectly can lead to problems while
+building.  If you see build issues from a clean, new checkout, this
+may be the problem.  You should remove all `module` commands from your
+`~/.*rc` files and get a clean, new login shell.  Then retry the
+build.
+
+### Unclean Checkout
+
+Another common cause of failed builds is having unintended changes in
+your source code or build system.  To test for this, get a clean, new
+checkout from the repository and retry.
+
+### Unsupported Platform
+
+Some apps only support a few platforms.  For example, the NEMSLegacy
+app is only supported on WCOSS Phase 1 (Gyre/Tide) and NOAA Theia.
+Attempts to build on other platforms may or may not work.
+
+### Simultaneous Builds
+
+Attempting to build multiple times in the same NEMS checkout directory
+will cause unexpected failures.  For example, if you are running the
+regression test system twice at once, multiple builds will happen at
+the same time.  On Theia, this frequently shows up as a massive, many
+terabyte, file which cannot be created due to fileset quota limits.
+Other failure modes are possible.
+
+
+
+
+Creating New Build Targets {#building-new-targets}
+--------------------------
+
+As discussed above, the NEMSAppBuilder builds the executable based on
+build targets described in `*.appBuild` files.  The list of build
+targets available for an app is found at the top level of the app in
+`*.appBuilder` files.  The app-level documentation should have
+information about the meanings and purpose of each build target.  If
+no build target is suitable for your purposes, you may need to create
+one.  Frequently, the best way is to modify an existing appBuild file.
+This section describes the various parts of the build configuration so
+to assist you in deciding what to change when making your own build.
+
+### Build Description File: `*.appBuild`
+
+As an example, let us take the UGCS-Seasonal.appBuilder file:
+
+    # Climate Forecast System
+    #
+    ## GSM-MOM5-CICE NEMS Application Builder file
+    
+    COMPONENTS=( "GSM" "CICE" "MOM5" )
+    
+    # CICE
+    CICE_SRCDIR=$ROOTDIR/CICE
+    CICE_DIR=$ROOTDIR/CICE-INSTALL
+    
+    # MOM5
+    MOM5_SRCDIR=$ROOTDIR/MOM5
+    MOM5_DIR=$ROOTDIR/MOM5-INSTALL
+
+    case "$FULL_MACHINE_ID" in
+        yellowstone|gaea)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_NUOPC
+            ;;
+        wcoss*|theia)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_700_gsm
+            CONFOPT="gsm_intel_${MACHINE_ID:?}"
+            ;;
+    esac
+
+The script must set certain variables:
+
+ * `COMPONENTS = ( LIST )` --- the list of components that should be
+   enabled in the NEMS executable.
+
+ * For each component:
+
+    * `*_SRCDIR` --- the source code for that component
+
+    * `*_DIR` --- the installation directory for that component
+
+ * `CHOSEN_MODULE` --- a modulefile, relative to the app-level
+   `modulefiles` directory, that sets up the environment.
+
+The build can also override three configuration files by placing a new
+copy of the file in the application-level `conf` directory.  
+
+ * `CONFOPT` --- the `configure.nems` file with build-time and
+   link-time settings.
+
+ * `EXTERNALS_NEMS` --- an `externals.nems` file that specifies
+   locations of data and stub components.
+
+ * `ESMF_VERSION_DEFINE` - path to an `ESMFVersionDefine.h` file
+
+Reasonable defaults will be provided if any of those are unspecified.
+
+
+
+
+### Platform Adaption
+
+Note that the `*.appBuild` file has different options for different machines:
+
+    case "$FULL_MACHINE_ID" in
+        yellowstone|gaea)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_NUOPC
+            ;;
+        wcoss*|theia)
+            CHOSEN_MODULE=$FULL_MACHINE_ID/ESMF_700_gsm
+            CONFOPT="gsm_intel_${MACHINE_ID:?}"
+            ;;
+    esac
+
+The `$FULL_MACHINE_ID` and `$MACHINE_ID` variables can be
+used to detect which machine you're running on.  This lets your
+`*.appBuild` file decide which options to use.
+
+| Location                 | `$FULL_MACHINE_ID` | `$MACHINE_ID`  | `$PEX`    |
+| ------------------------ | ------------------ | -------------- | --------- | 
+| WCOSS Phase 1 (IBM side) | `wcoss.phase1`     | `wcoss`        | `1`       |
+| WCOSS Phase 2 (IBM side) | `wcoss.phase2`     | `wcoss`        | `2`       |
+| WCOSS Cray               | `wcoss_cray`       | `wcoss_cray`   | `c`       |
+| Jet (all partitions)     | `jet`              | `jet`          | &nbsp;    |
+| Theia                    | `theia`            | `theia`        | &nbsp;    |
+| GAEA                     | `gaea`             | `gaea`         | &nbsp;    |
+| Yellowstone              | `yellowstone`      | `yellowstone`  | &nbsp;    |
+| (other)                  | `unknown`          | `unknown`      | &nbsp;    |
+
+The confusing mixture of `.` and `_` is for historical reasons
+(maintaining compatibility with older scripts).  For your convenience,
+two aliases are provided:
+
+* `FULL_MACHINE_ID_DOT` = wcoss.phase1, wcoss.phase2, wcoss.cray
+* `FULL_MACHINE_ID_UNDER` = wcoss_phase1, wcoss_phase2, wcoss_cray
+
+Similar aliases are provided for `$MACHINE_ID_DOT` and `$MACHINE_ID_UNDER`.
+
+
+
+
+### Build Environment Specification
+
+The NEMS build system requires two files to set up the build
+environment.  
+
+ * `modulefiles/$PLATFORM/$MODULE` - uses the unix `module` command to
+   load `modulefiles`
+
+ * `conf/configure.nems.*` - sets variables required by NEMS build scripts.
+
+The second file will be chosen automatically from `NEMS/src/conf` if
+none is provided.
+
+
+
+
+### The `modulefile`
+
+The NEMS `modulefiles` must follow the standard `modulefile` syntax.
+That means they must begin with this line:
+
+    #%Module######################################################################
+
+Other lines specify the modules to load.  Here is the full modulefile
+for one of the GFS apps on WCOSS:
+
+    #%Module######################################################################
+    # This script is responsible for loading modules that are compatible with
+    # the NUOPC Layer version used in NEMS.
+    
+    module load  EnvVars/1.0.0
+    module load ibmpe lsf NetCDF/4.2/serial ESMF/700
+    module load ics/15.0.3
+
+Note that this is not a shell script; it is tcl script.  You cannot
+have any `source` or `export` commands.  It is best to stick with
+these two commands if possible:
+
+ * `module load module-name`
+ * `module use /p/a/t/h`
+
+The `modulefiles` can contain other commands, the most common of which
+are:
+
+ * `prepend-path /p/a/t/h`
+ * `append-path /p/a/t/h`
+ * `remove-path /p/a/t/h`
+ * `setenv variable value`
+ * `unsetenv variable`
+ * `set variable value`
+ * `unset variable`
+
+There are multiple implementations of the `module` command, some of
+which have more powerful features.  On some platforms, you may have to
+use the more advanced features in order to properly set up the
+environment.  That is why NEMS uses a different modulefile for each
+platform.
+
+No matter what you do, you must follow this critical rule:
+
+\warning Never put a `module purge` command in a `modulefile`.
+
+Placing a `module purge` in a `modulefile` will cause infinite loops,
+corrupted environments, or segfaults of the `module` command on some
+platforms.  The NEMS scripts already purge the modules by running a
+"shell include" file before loading your `modulefile`.  This script
+can be found in:
+
+* `NEMS/src/conf/module-setup.sh.inc` (for bash, sh, and ksh)
+* `NEMS/src/conf/module-setup.csh.inc` (for csh and tcsh)
+
+
+
+
+### For More Information About `make` and `modulefiles`
+
+Good general resources for learning about `modulefiles` are:
+
+* NICS module website: http://www.nics.tennessee.edu/computing-resources/modules
+
+* XSEDE computing environments page: https://www.xsede.org/software-environments
+
+*  The `module` command:
+
+       module help # list allowed "module" commands
+       module avail # list available modules
+       module spider # also list modules in hidden families
+
+   Note that the `module spider` command is only available on
+   platforms that are able to hide modules that are unavailable
+   without prerequisites.  For example, hiding a `NetCDF` library that
+   was compiled with `gfortran` until the `gfortran` module is loaded.
+
+For `makefiles`,
+
+* GNU Make tutorial: http://opensourceforu.com/2012/06/gnu-make-in-detail-for-beginners/
+
+* GNU Make manual: https://www.gnu.org/s/make/manual/make.html
+
+* If you have a lot of time on your hands, a book: https://notendur.hi.is/jonasson/software/make-book/
+
+
Index: checkout/doc/DREV80061.md
===================================================================
--- checkout/doc/DREV80061.md	(nonexistent)
+++ checkout/doc/DREV80061.md	(revision 94669)
@@ -0,0 +1,162 @@
+DREV80061: WAM-IPE 0.4 Validate WAM and IPE Standalone {#milestone_DREV80061}
+======================================================
+
+\date 8/1/2016
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+
+Description
+-----------
+
+This milestone is a check to ensure that the components running as
+part of a system within NEMS produce the same results as the
+components running stand-alone. To accomplish this, the Whole
+Atmosphere Model (WAM), the Ionsphere-Plasmasphere Electrodynamics
+(IPE) model, and the 
+\ref sw_mediator "space weather mediator"
+are run side by side within the NEMS system without exchanging
+fields. The output of the IPE component of this system is compared to
+a stand-alone version of IPE from before it was incorporated into
+NEMS. The output of the WAM component of this system is compared to a
+version of WAM running stand-alone within NEMS.  This is a technical
+(non-scientific) milestone.
+
+\todo reference wam and ipe in the above paragraph
+
+In this milestone both model components run for 1 hour starting at
+1/15/2009. The WAM component runs on a global 3D reduced Gaussian grid
+(\ref GRID_wam "WAM grid"). The horizontal resolution is T62. The
+vertical component of this grid is 150 levels in pressure with a
+maximum height of approximately 800 km. The IPE component runs on an
+80 x 170 flux tube grid (\ref GRID_IPE "IPE grid") that extends up to
+approximately 360,000 km.
+
+NUOPC "caps", which are essentially wrappers for the coupling
+interface, are provided for the WAM and IPE components allowing them
+to work in NEMS and other NUOPC-compliant systems. For example, the
+"IPE cap" allows the IPE model to work as a part of the NEMS system.
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the WAM-IPE Build & Run page.
+
+\todo reference WAM-IPE Build & Run page.
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the side by side WAM-IPE system is
+provided below.  For details on the run sequence in general please
+refer to the 
+\ref configuring
+and
+\ref architecture
+pages.
+
+    runSeq::
+     @180.0
+       ATM
+       IPM
+     @
+    ::
+
+The stand-alone version of IPE wasn't run within NEMS, so it doesn't
+have a run-time configuration. The NEMS run-time configuration for the
+stand-alone WAM run is provided below.
+
+    runSeq::
+     @180.0
+       ATM
+     @
+    ::
+
+Validation
+----------
+
+The validation procedure for this milestone is to compare output files
+from the IPE and WAM components running within a side by side WAM-IPE
+NEMS system to versions of the components running stand-alone. The
+following subsections describe the results for both components.
+
+A test report is available for the validation run: 
+\subpage REPORT-20160801-WAM-IPE-standalone
+
+### IPE
+
+To verify that IPE is running correctly within NEMS we compare the
+output files from a stand-alone version of IPE from before it was
+incorporated into NEMS to the current version running in a side by
+side WAM-IPE NEMS system. This comparison is done using the UNIX cmp
+command. This command compares files byte by byte to ensure that they
+are identical. The files being compared are plasma information files
+(plasma00 - plasma16), and electodynamics information files (fort.2000
+- fort.2010). See the results of the comparison below. Given that the
+files produced by the two versions are identical, the conclusion was
+that IPE running within the WAM-IPE system is behaving the same as the
+stand-alone version.
+
+| File Name |     File Source 1 |  File Source 2 | Number of Bytes Different |  Status |
+| :-------: | :---------------: | :------------: | :-----------------------: | :-----: |
+| plasma00  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma01  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma02  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma03  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma04  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma05  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma06  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma07  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma08  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma09  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma10  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma11  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma12  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma13  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma14  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma15  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| plasma16  |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2000 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |      OK |
+| fort.2001 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2002 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2003 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2004 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2005 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2006 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2007 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2008 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2009 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+| fort.2010 |   Stand-alone IPE |     IPE within side by side WAM-IPE |        0 |     OK |
+
+### WAM
+
+To verify that WAM is running correctly within NEMS we compare the
+output files from WAM running stand-alone to WAM running in a side by
+side WAM-IPE NEMS system. This comparison is done using the unix cmp
+command. This command compares files byte by byte to ensure that they
+are identical. The files being compared are flux files (flxf00 &
+flxf01), surface files (sfcf00 & sfcf01), and sigma files (sigf00 &
+sigf01). See the results of the comparison below. Given that the files
+produced by the two versions are identical, the conclusion was that
+WAM running within the WAM-IPE system is behaving the same as the
+stand-alone version.
+
+| File Name |  File Source 1  |     File Source 2  |  Number of Bytes Different |       Status |
+| :-------: | :-------------: | :----------------: | :------------------------: | :----------: |
+| flxf00 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+| flxf01 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+| sfcf00 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+| sfcf01 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+| sigf00 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+| sigf01 |      Stand-alone WAM |     WAM within side by side WAM-IPE |                 0 |       OK |
+
+### Limitations and Technical Notes
+
+This milestone is purely a verification that the individual components
+are running correctly within the WAM-IPE system. Because no fields are
+being exchaged, this system shouldn't be expected to behave as a fully
+coupled system.
\ No newline at end of file
Index: checkout/doc/DREV61075.md
===================================================================
--- checkout/doc/DREV61075.md	(nonexistent)
+++ checkout/doc/DREV61075.md	(revision 94669)
@@ -0,0 +1,118 @@
+DREV61075: Reg-Hydro 0.1 {#milestone_DREV61075}
+========================
+
+\date 9/1/2015
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/Regional/trunk
+
+Description
+-----------
+
+DREV61075: Regional 0.1, is an internal delivery of a modeling
+application in which GSM-CICE-MOM5 run side-by-side with WRF-Hydro as
+a hydraulics/routing component and LIS/Noah.3.3 as a land surface
+model.  This is a technical milestone required to arrive at a fully
+coupled system.  The global components (GSM-CICE-MOM5) are currently
+used in the system as stand ins for the regional atmosphere-ice-ocean
+components. (Full NUOPC interfaces are not yet available, e.g., for
+NMMB).  Side-by-side indicates that the WRF-Hydro and LIS components
+do not communicate with the other components, so no coupling fields
+are exchanged.  In order to run these components side-by-side they
+require forcing data input files relevant to the calendar and
+timesteps for the run.
+ 
+NUOPC "caps" are provided for both WRF-Hydro and LIS allowing these
+components to work in NUOPC-compliant systems.
+ 
+Code Repositories
+-----------------
+
+WRF-Hydro source code is available in the 
+[external_components directory of the NEMS SVN repository](https://esgf.esrl.noaa.gov/projects/couplednems/external_components).  
+It is maintained there as a clone of the official WRF-Hydro repository
+at NCAR (privately hosted on GitHub).  Specifically, the version used
+for the v0.1 Regional app is:
+ 
+ * https://svnemc.ncep.noaa.gov/projects/nems/external_comps/WRFHYDRO/trunk/NDHM  (revision 61047)
+ 
+LIS source code is also available in the external_components directory
+of the NEMS SVN repository.  It is maintained as a clone of the
+official LIS repository hosted at NASA-NCCS (private access).
+ 
+ * https://svnemc.ncep.noaa.gov/projects/nems/external_comps/LIS/branches/nuopc (revision 61000)
+
+ Validation
+
+Previous revisions of WRF-Hydro ran coupled to an atmosphere with an
+embedded land or standalone with its own embedded land.  To isolate
+the hydraulics component of WRF-Hydro, a new function was added that
+is capable of reading land model output from file and is used in place
+of an active land component as input to the channel routing
+component. This feature was used in a native (outside NUOPC/NEMS) run
+of WRF-Hydro to generate a baseline output for a 24 hour run where
+land forcing data is provided hourly on a 268x260 domain.  The native
+run produces hourly streamflow values at station locations with
+lat/lon coordinates.  The outside NUOPC/NEMS standalone hydraulic
+component is compiled with the compile_offline_NoahMP.csh script.  In
+order to turn off the embedded land the hrldas configuration file
+forcing type value must be set to 8.  Validation is carried out
+through ncdiff comparison of the standalone run and regional run
+output of qlink1, the streamflow output, over a 4 hour time window on
+December 01, 2009.  The land surface forcing data used was originally
+from a 2013 run, but the files were renamed to appear as a 2009 run in
+order to share a common time domain for the side-by-side run.
+
+After building LIS using the compile script a LIS executable capable
+of running a standalone (outside NUOPC/NEMS) surface model is placed
+in the root directory.  In order to run the LIS executable in
+standalone mode the lis configuration file running mode value is set
+to 'retrospective'.  Validation is carried out through ncdiff
+comparisons of the standalone run and output of SoilMoist_tavg over a
+4 hour time window on December 01, 2009.
+
+Four hours is chosen as a quick test that will run in under 30 minutes
+on Theia.
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the default Regional configuration
+is provided below.  Note that LND and HYD are run on disjoint
+processor sets, so they will run concurrently in each of the time
+loops and no data will be exchanged.  For details on the run sequence
+in general please refer to the 
+\ref configuring
+and
+\ref architecture
+pages.
+
+    runSeq::
+      @7200.0
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_slow
+        MED -> OCN :remapMethod=redist
+        OCN
+        @3600.0
+          MED MedPhase_fast_before
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+                  LND
+          HYD
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_fast_after
+        @
+      @
+    ::
+ 
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) are provided on the Regional
+application page.
\ No newline at end of file
Index: checkout/doc/nemsmain.md
===================================================================
--- checkout/doc/nemsmain.md	(nonexistent)
+++ checkout/doc/nemsmain.md	(revision 94669)
@@ -0,0 +1,36 @@
+NEMS Documentation
+==================
+
+This website describes the coupling infrastructure and
+techniques used in the
+[NOAA Environmental Modeling System (NEMS)](http://www.emc.ncep.noaa.gov/index.php?branch=NEMS).
+NEMS coupling infrastructure is based on the
+[Earth System Modeling Framework (ESMF)](http://www.earthsystemmodeling.org/)
+and
+[National Unified Operational Prediction Capability (NUOPC)](https://www.earthsystemcog.org/projects/nuopc/)
+Layer code and conventions. The software is under active development,
+and is incorporating atmosphere, ocean, ice, and wave models. The
+coupled system will be used for weather and longer range predictions.
+
+Everything in this doxgen-generated webpage is created from files
+within the NEMS repository.  That documentation is versioned with the
+code -- every time the code is updated, we update the documentation to
+match.  Further information outside this website can be found in links
+within the above paragraph, as well as the 
+[COG site](https://esgf.esrl.noaa.gov/projects/couplednems/)
+
+\todo Refer to VLAB NEMS page once one exists.
+
+* @subpage documentation
+
+* @subpage HowToOldToNew
+
+* @subpage milestones
+
+* @subpage grids
+
+* @subpage reports
+
+* @subpage links
+
+* @subpage nemsguide
Index: checkout/doc/GRID_LIS_T126.md
===================================================================
--- checkout/doc/GRID_LIS_T126.md	(nonexistent)
+++ checkout/doc/GRID_LIS_T126.md	(revision 94669)
@@ -0,0 +1,34 @@
+Land Information System (LIS) T126 Grid {#GRID_LIS_T126}
+=======================================
+
+Description
+-----------
+
+The global Gaussian T126 grid^1^ wraps around the entire Earth
+starting from the Prime Meridian and ending at the Prime Meridian.  It
+covers all latitude points from south (-90) to north (90).
+
+| Long Name                                   | Name    | Value       |
+| :------------------------------------------ | :------ | :---------- |
+| Number of longitudinal points               | N<sub>i</sub>    | 384         |
+| Number of latitudinal points                | N<sub>j</sub>    | 190         |
+| Northernmost latitude                       | La<sub>1</sub>   | 90 N        |
+| Easternmost longitude                       | Lo<sub>1</sub>   | 0.000 E     |
+| Southernmost latitude                       | La<sub>2</sub>   | 90 S        |
+| Westernmost longitude                       | Lo<sub>2</sub>   | 0.000 E     |
+| Longitudinal increment                      | D<sub>i</sub>    | .93750&deg; |
+| Number of latitude circles, pole to equator | N       |  95         |
+
+Grid Cell Plot (NCL)
+--------------------
+
+\image html GRID_LIS_T126-lis_t126.png
+
+\image html GRID_LIS_T126-lis_t126_conus.png
+
+\image html GRID_LIS_T126-lis_t126_frontrange.png
+
+Reference
+---------
+
+1. [Master List of NCEP Storage Grids, GRIB Edition 1 (FM92)](http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html), grid is number 126.
Index: checkout/doc/DREV84552.md
===================================================================
--- checkout/doc/DREV84552.md	(nonexistent)
+++ checkout/doc/DREV84552.md	(revision 94669)
@@ -0,0 +1,146 @@
+DREV84552: Reg-Nest 0.2  {#milestone_DREV84552}
+=======================
+
+\date Last revised: 11/18/2016
+
+Description
+-----------
+
+Regional-Nest 0.2 (DREV 84552) is a two-way configuration of the
+Nonhydrostatic Mesoscale Model on the B Grid (NMMB) and a regional
+configuration of the HYbrid Coordinate Ocean Model (HYCOM). The main
+features of this milestone release include 1) two way coupling between
+HYCOM and NMMB with moving nest support; 2) performance enhancement.
+
+This revision has been run for 2 days using initial condition and
+boundary condition based on hurricane Patricia and exhibits behavior
+that is Earth-like. The initial condition starts at 2015 10 20 12:00
+hours. This is the starting time for HYCOM initialization and
+integration. This regional HYCOM has a 1/12th degree resolution with
+1284x516 data points spanning a geographical region (-179.76, 2.48) to
+(-77.12, 39.98). HYCOM works on a regular lat-lon grid over this
+geographic region. The regional NMMB grid has a single parent domain
+roughly at 1/4 degree resolution with 451x451x42 data points spanning
+a geographic region (-40.5, -40.5) with central location (-94.3,
+18.4). Resolution of the moving outer nest is 231x201x42. Resolution
+of the moving inner nest is 381x345x42.  The NMMB grids are Lambertian
+curvi-linear. It can also be thought of as a rotated lat-lon grid.
+
+Field Exchange
+--------------
+
+Currently all fields are transferred using bilinear interpolation. The
+following flux fields are transferred between NMMB and HYCOM:
+
+| NMMB->HYCOM | HYCOM->NMMB |
+| ----------- | ----------- |
+| latent heat flux | sea surface temperature |
+| sensible heat flux | &nbsp; |
+| net longwave | &nbsp; |
+| net shortwave | &nbsp; |
+| zonal momentum flux | &nbsp; |
+| meridional momentum flux | &nbsp; |
+| precipitation rate | &nbsp; |
+ 
+Processor Layout and Run Sequences
+----------------------------------
+
+The coupled system runs NMMB and HYCOM concurrently. The processor
+layout and run sequence are detailed below. HYCOM integrates using
+Patricia initial condition. Hycom ignores mediator input on its first
+time step. HYCOM then uses mediator input in subsequent time steps
+after NMMB has integrated and provides valid input in precipitation,
+radiative fluxes, heat fluxes, and momentum fluxes at couping
+intervals.
+
+| Component | Processor Layout |
+| --------- | ---------------- |
+| NMMB      | 0-249            |
+| HYCOM     | 250-393          |
+| MEDIATOR  | 394-453          |
+
+    runSeq::
+      @1800.0
+        MED MedPhase_slow
+        MED -> OCN :remapMethod=redist
+        @600.0
+          MED MedPhase_fast_before
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_fast_after
+        @
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+ 
+Validation
+----------
+
+### Parent Domain
+
+Plots of SST ingested in NMMB from HYCOM are shown here at 01 hr, 48 hr
+
+\image html DREV84552-sst_d01_01hrs_b.png  SST received by NMMB parent nest after one hour model simulation time. By now HYCOM has run 2 time steps and sends updated SST to NMMB.
+
+\image html DREV84552-sst_d01_48hrs_b.png SST received by NMMB parent nest at 48 hr of model simulation.
+
+ 
+### Moving Outer Nest
+
+\image html DREV84552-sst_d02_01hrs.png SST received by NMMB outer nest after one hour model simulation time.
+
+
+\image html DREV84552-sst_d02_48hrs.png SST received by NMMB outer nest at 48 hr of model simulation.
+
+ 
+### Moving Inner Nest
+
+\image html DREV84552-sst_d03_01hrs.png SST received by NMMB inner nest after one hour model simulation time.
+
+\image html DREV84552-sst_d03_48hrs.png SST received by NMMB inner nest at 48 hr of model simulation.
+
+Limitation of this milestone release
+------------------------------------
+
+The coastline SST results look jagged in the outer and inner
+nests. There are two possible explanations to this that need to be
+investigated: 1) the outer/inner nest SSTs are interpolated from
+coarser resolution parent domain. 2) masking along the coastline from
+parent nest is much coarser than the outer/inner nest. 
+
+Download and Build
+------------------
+
+This revision can be downloaded with the following command:
+
+ * svn co -r 84552 https://svnemc.ncep.noaa.gov/projects/nems/apps/Regional-Nest
+
+Instructions on how to download and build a NEMS application are discussed in the
+\ref documentation "NEMS User's Guide and Reference".
+
+\todo link to relevant sections of nems guide
+
+The coupled system can be built with the following command after download is complete:
+
+    ./NEMS/NEMSAppBuilder
+
+Running the Patricia moving nest compset
+----------------------------------------
+
+Compsets that can be run with this revision are: cfsr%2015102012_48hr%nest_nmm_hycom%1_12th
+
+To run compsets, start within the UGCS-Seasonal directory and execute the NEMS CompsetRun tool by typing:
+
+
+./NEMS/NEMSCompsetRun -compset NEMS/compsets/cfsr%2015102012_48hr%nest_nmm_hycom%1_12th
+
+Currently, the data files are only set up on Theia. The data files for HYCOM can be found at:
+
+/scratch4/NCEPDEV/nems/noscrub/NEMS-Data/HYCOM/REGIONAL_HEP20/
+
+Data files for NMMB can be found at:
+
+/scratch4/NCEPDEV/nems/noscrub/NEMS-Data/RT-Baselines/NMMB_patricia_nests
+Last Update: Feb. 24, 2017, 9:09 a.m. by Fei Liu 
\ No newline at end of file
Index: checkout/doc/GRID_HYCOM_POP_glbx1v6.md
===================================================================
--- checkout/doc/GRID_HYCOM_POP_glbx1v6.md	(nonexistent)
+++ checkout/doc/GRID_HYCOM_POP_glbx1v6.md	(revision 94669)
@@ -0,0 +1,58 @@
+HYbrid Coordinate Ocean Model (HYCOM) Grid POP glbx1v6 {#GRID_HYCOM_POP_glbx1v6}
+======================================================
+
+Description
+-----------
+
+HYCOM runs on a 1.125 degree global lat-lon POP grid (glbx1v6). The
+`glbx1v6` grid is a regular spherical grid in both
+hemispheres. Longitude runs from -116.95999? to 319.59000? with
+1.125 degree spacing; and latitude runs from -79.22000? to
+89.71000?.
+ 
+| Long Name                     | Name   | Value      |
+| :---------------------------- | :----- | :--------- |
+| Number of longitudinal points | N<sub>i</sub>   | 320        |
+| Number of latitudinal points  | N<sub>j</sub>   | 384        |
+| Minimum longitude             | &nbsp; | -116.95999 |
+| Maximum longitude             | &nbsp; | 319.59000  |
+| Minimum latitude              | &nbsp; | -79.22000  |
+| Maximum latitude              | &nbsp; | 89.31000   |
+
+
+Longitude Plot
+--------------
+
+\image html GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lon.png
+
+Latitude Plot
+-------------
+
+\image html GRID_HYCOM_POP_glbx1v6-hycom_GLBx_lat.png
+
+Mask Plot
+---------
+
+\image html GRID_HYCOM_POP_glbx1v6-hycom_GLBx_msk.png
+
+Data Decomposition
+------------------
+
+The grid and data decomposition is done in the following manner:
+
+1. The latitudes are regularly decomposed into jqr=5 bands, leading to
+76 and 77 latitude wide bands.
+
+2. Each latitude band is decomposed into blocks along the
+longitude. The actual size of each block, and the number of blocks is
+flexible within some limits in order to allow for load balancing. The
+limits are set by iqr=8, the maximum number of blocks in each band,
+and idm=77, the maximum number of longitudes per block.
+
+3. Every PET (persistent execution thread, i.e. MPI rank) is
+associated with exactly one lat-lon block. Here each blocks contains
+ocean and must be associated with a PET. There are 40 blocks and 40
+PETs.
+
+ 
+\image html GRID_HYCOM_POP_glbx1v6-depth_POP1v6_01.040.png
Index: checkout/doc/HowTo_OldToNewStruct_compbranch.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_compbranch.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_compbranch.md	(revision 94669)
@@ -0,0 +1,86 @@
+Step 3. Updating Component Branches  {#HowToOldToNewCompBranch}
+==============================
+
+ 
+Step 3.1: 
+---------
+
+The next step is to manually merge changes from the old gsm branch to
+the new branch created from the trunk. To see the changes made in the
+old GSM branch, we can look at the trac page:
+
+ * https://svnemc.ncep.noaa.gov/trac/gsm/changeset?reponame=&new=84116%40branches%2Fmeixner%2Ftwowaywavcoup&old=80525%40trunk
+ 
+Skipping the changes that are simply extra spaces, these changes were
+put into the r93335 GSM branch created in Step 6:
+
+ * https://svnemc.ncep.noaa.gov/projects/gsm/branches/NEMSUpdate/UGCSWeather
+ 
+I can confirm that these are in fact the changes I want again using
+the trac page:
+
+ * https://svnemc.ncep.noaa.gov/trac/gsm/changeset?reponame=&new=93335%40branches%2FNEMSUpdate%2FUGCSWeather&old=89613%40trunk
+ 
+Step 3.2:
+---------
+
+When GSM moved to having a cap like the other components, some files
+moved from being stored in the NEMS repository to being stored in GSM,
+such as:
+
+    <nems trunk/branch>/src/atmos/share      --->    gsm/trunk/share
+    <nems trunk/branch>/src/atmos/post       --->    gsm/trunk/post
+ 
+To see what changes I had made that would need to be merged from the
+old version of NEMS to the new branch of GSM, I looked at the trac
+page
+
+ * https://svnemc.ncep.noaa.gov/trac/nems/changeset?reponame=&new=84203%40branches%2FUGCS-Seasonal%2FtwoWayWW3_from_r80562&old=80562%40trunk
+ 
+For this example, the only updates were to share/module_CPLFIELDS.F90
+These updates are now in the revision 93343 of the fowllowing GSM
+branch
+
+ * https://svnemc.ncep.noaa.gov/projects/gsm/branches/NEMSUpdate/UGCSWeather
+ 
+Now all of the GSM updates should be in the new GSM branch.  
+ 
+Step 3.3: 
+---------
+
+Now we need to merge the appropriate changes from the old branch of
+nems into the new branch of nems.  Again, we can look at the trac page
+for what changes were previously made
+
+ * https://svnemc.ncep.noaa.gov/trac/nems/changeset?reponame=&new=84203%40branches%2FUGCS-Seasonal%2FtwoWayWW3_from_r80562&old=80562%40trunk
+ 
+Only modified files that remain in the new nems branch (and are not
+files which have been moved to the App level) need to be updated.  In
+this case, these files are `NEMSAppBuilder` and
+`src/module_EARTH_GRID_COMP.F90`.  Note that although there is still a
+file named `NEMSCompsetRun`, this file is not the same as the old
+`NEMSCompsetRun` and therefore should not be updated.
+ 
+These changes were checked into r93381 of the follwoing branch of nems
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/branches/NEMSUpdate/UGCSWeather
+
+
+Step 3.4: 
+--------
+
+At this point the changes that remain in your nems branch are either in now
+obsolete files or files that moved to the app level. Some files to update on
+the app level include: 
+  nems/test/nems.configure.*.IN -->  app/parm/nems.configure.*.IN
+  nems/test/gsm_config          -->  app/compset/gsm_config
+Note there may be more files that you have to update that are not listed here. 
+
+
+Step 3.5: 
+-------- 
+
+If you are using revision numbers in your svn:externals, make sure to update these 
+revision numbers to point to the head of the externals (by not specifying a 
+revision number).  
+
Index: checkout/doc/milestones.dox
===================================================================
--- checkout/doc/milestones.dox	(nonexistent)
+++ checkout/doc/milestones.dox	(revision 94669)
@@ -0,0 +1,32 @@
+/**@page milestones Milestone Revisions
+
+The pages in this section relate to development milestones for NEMS
+applications. The pages' titles include a revision number, application
+name, and milestone number. A revision prefix indicates whether it is
+a branch (DREV) or trunk (R) revision. New development occurs on a
+code repository branch. Stable branch versions are merged back to the
+repository trunk.
+
+Web pages include the purpose of the milestone, grids, run duration,
+validation, and other information.
+
+  + @subpage milestone_DREV93202
+  + @subpage milestone_DREV90957
+  + @subpage milestone_DREV89738
+  + @subpage milestone_DREV88884
+  + @subpage milestone_DREV87779
+  + @subpage milestone_DREV84552
+  + @subpage milestone_DREV84205
+  + @subpage milestone_DREV80567
+  + @subpage milestone_DREV80061
+  + @subpage milestone_DREV79954
+  + @subpage milestone_DREV76675
+  + @subpage milestone_DREV73964
+  + @subpage milestone_DREV73436
+  + @subpage milestone_DREV70089
+  + @subpage milestone_DREV61075
+  + @subpage milestone_DREV58214
+  + @subpage milestone_DREV53978
+
+*/
+
Index: checkout/doc/REPORT-20160531-WAM-IPE-initial-1way.md
===================================================================
--- checkout/doc/REPORT-20160531-WAM-IPE-initial-1way.md	(nonexistent)
+++ checkout/doc/REPORT-20160531-WAM-IPE-initial-1way.md	(revision 94669)
@@ -0,0 +1,227 @@
+Initial One-Way WAM to IPE Connection Test Report {#REPORT-20160531-WAM-IPE-initial-1way}
+=================================================
+
+\date 05/31/2016
+
+Versioning
+----------
+
+**User**: Robert.Oehmke
+
+**Project**: nems
+
+**Platform**: Theia Cray CS400
+
+**ESMF Version**: ESMF_7_0_0
+
+### Repositories
+
+Application:
+
+     https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+        -r 76675 (May 2016)
+
+NEMS:
+
+    https://svnemc.ncep.noaa.gov/projects/nems/branches/WAM-IPE/milestone3
+        -r 76674 (May 2016)
+
+WAM/GSM:
+
+    https://svnemc.ncep.noaa.gov/projects/gsm/branches/WAM-IPE/milestone3
+        -r 76469 (May 2016)
+
+IPE:
+
+    https://github.com/IPEBestModelInTheWorld/ipe/trunk
+        -r 362 (May 2016)
+
+IPE_CAP:
+
+    https://svnemc.ncep.noaa.gov/projects/ipe/branches/nuopc_cap
+        -r 75858? (May 2016)
+
+### Model Versions
+  
+ * Ionosphere Plasmasphere Electrodynamics (IPE) model R362 (May 2016)
+
+ * Whole Atmosphere Model (WAM) R76469 (May 2016)
+
+
+Execution
+---------
+
+### Environment    
+
+    COMPONENTS=( GSM, IPE, DATAWAM, DATAIPE)
+    IPE_SRCDIR=$ROOTDIR/IPE
+    IPE_BINDIR=$ROOTDIR/IPE-INSTALL
+    DATAWAM_SRCDIR=$ROOTDIR/DATAWAM
+    DATAWAM_BINDIR=$ROOTDIR/DATAWAM-INSTALL
+    DATAIPE_SRCDIR=$ROOTDIR/DATAIPE
+    DATAIPE_BINDIR=$ROOTDIR/DATAIPE-INSTALL
+    source /etc/profile
+    module load intel impi netcdf
+        Intel: 14.0.2
+        Intel MPI: 4.1.3.048
+        NetCDF: 4.3.0
+    module use /scratch4/NCEPDEV/nems/save/Gerhard.Theurich/Modulefiles
+    module load esmf/7.0.0
+
+### NEMS Build Command
+     
+    NEMS/NEMSAppBuilder
+
+### NEMS Compsets
+
+#### `adhoc_1hr_spacewx_gsm%wam%T62_ipe%80x170`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+###############################################################################
+#
+#  WAM-IPE coupled run
+#
+###############################################################################
+
+export TEST_DESCR="WAM-IPE 1h coupled run"
+
+# - gsm configuration ---
+export_gsm
+export CDATE=2009011500
+export WLCLK=30
+export NHRS=1
+export FHOUT=1
+export TASKS=40
+export PE1=32
+export THRD=1
+export QUILT=.false.
+export FDFI=0
+export CP2=.false.
+export IDEA=.true.
+export IDVC=3
+export THERMODYN_ID=3
+export SFCPRESS_ID=2
+export SPECTRALLOOP=2
+
+# - nems.configure ---
+export_nems
+export nems_configure=med_atm_ipm
+export atm_model=gsm
+export atm_petlist_bounds="0 15"
+export ipm_model=ipe
+export ipm_petlist_bounds="16 31"
+export med_model=spaceweather
+export med_petlist_bounds="32 39"
+export coupling_interval_fast_sec=180.0
+export coupling_interval_sec=180.0
+
+export F107_KP_SIZE=56
+export F107_KP_INTERVAL=10800
+export WAM_IPE_COUPLING=.true.
+export HEIGHT_DEPENDENT_G=.true.
+export F107_KP_SKIP_SIZE=24
+
+# - component specific setup calls ---
+setup_wam_T62_2009011500
+setup_ipe
+setup_spaceweather_gsm%wam%T62_ipe%80x170
+
+# -
+RUN_SCRIPT=rt_gfs.sh
+
+# - validation
+export CNTL_DIR=adhoc_1hr_spacewx_gsm%wam%T62_ipe%80x170_V0002
+export LIST_FILES="IPE.inp SMSnamelist \
+                  sigf00 sigf01 sfcf00 sfcf01 flxf00 flxf01 \
+                  plasma00 plasma01 plasma02 plasma03 plasma04 \
+                  plasma05 plasma06 plasma07 plasma08 plasma09 \
+                  plasma10 plasma11 plasma12 plasma13 plasma14 \
+                  plasma15 plasma16 \
+                  wam3dgridnew2.nc ipe3dgrid2.nc wam2dmesh.nc"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### NEMS Configuration
+
+#### `nems.configure.med_atm_ipm.IN`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+####  NEMS Run-Time Configuration File  #####
+#############################################
+
+# EARTH #
+EARTH_component_list: MED ATM IPM
+EARTH_attributes::
+ Verbosity = max
+::
+
+# MED #
+MED_model:                      _med_model_
+MED_petlist_bounds:             _med_petlist_bounds_
+MED_attributes::
+ Verbosity = max
+ DumpFields = false
+ DumpRHs = false
+::
+
+# ATM #
+ATM_model:                      _atm_model_
+ATM_petlist_bounds:             _atm_petlist_bounds_
+ATM_attributes::
+ Verbosity = max
+::
+
+ 
+
+# IPM #
+IPM_model:                      _ipm_model_
+IPM_petlist_bounds:             _ipm_petlist_bounds_
+IPM_attributes::
+ Verbosity = max
+::
+
+# Run Sequence #
+runSeq::
+ @_coupling_interval_sec_
+   ATM -> MED :remapMethod=redist
+   MED
+   MED -> IPM :remapMethod=redist
+   ATM
+   IPM
+ @
+::
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### Runtime Data Files
+
+IPE:
+
+    /scratch3/NCEPDEV/swpc/noscrub/Naomi.Maruyama/ipe/grid/apex/1/GIP_apex_coords_global_lowres_new20120705
+
+WAM: 
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2009011500/*anl* 
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2009011500/wam_input_f107_kp.txt
+
+### Run Directory
+    
+    /scratch3/NCEPDEV/swpc/scrub/Robert.Oehmke/rt_82581/adhoc_1hr_spacewx_gsm%wam%T62_ipe%80x170
+
+Validation
+----------
+	
+Validated by visually comparing the fields as produced by WAM to the
+fields in IPE after being transferred through the space weather
+mediator.  The transfer includes regridding, so the fields are not
+expected to be an exact copy.
+
+| Field                    | Source Grid          | Destination Grid   | Status            |
+| :----------------------- | :------------------- | :----------------- | :---------------- |
+| `temp_neutral`           | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `northward_wind_neutral` | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `eastward_wind_neutral`  | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `upward_wind_neutral`    | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `o_density`              | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `o2_density`             | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
+| `n2_density`             | WAM Reduced Gaussian | IPE Flux Tube Grid | Visually Verified |
\ No newline at end of file
Index: checkout/doc/GRID_mom5_1deg_tripole.md
===================================================================
--- checkout/doc/GRID_mom5_1deg_tripole.md	(nonexistent)
+++ checkout/doc/GRID_mom5_1deg_tripole.md	(revision 94669)
@@ -0,0 +1,69 @@
+Modular Ocean Model 5 (MOM5) Tripole Grid (1 deg) {#GRID_mom5_1deg_tripole}
+=================================================
+
+Description
+-----------
+
+This page describes a MOM5 grid that is a tripolar at 1 degree
+resolution. The Murray (1996) tripolar grid is a regular spherical
+grid south of 65N and bipolar north of 65N. Longitude runs from -279.5
+to 79.5 degrees with exact 1 degree spacing; and laittude runs from
+-81.5 (edge of antarctic land) to -89.5 degrees with uniform 1 degree
+spacing outside the tropical region where gaussian spacing is used so
+the equitorial region has a higher resolution.
+ 
+| Long Name                     | Name   | Value  |
+| :---------------------------- | :----- | :----- |
+| Number of longitudinal points |   N<sub>i</sub>   |  360   |
+| Number of latitudinal points  |   N<sub>j</sub>   |  200   |
+| Minimum longitude             | &nbsp; | -279.5 |
+| Maximum longitude             | &nbsp; | 79.5   |
+| Minimum latitude              | &nbsp; | -81.5  |
+| Maximum latitude              | &nbsp; | 89.5   |
+
+Longitude Plot
+--------------
+
+\image html  GRID_mom5_1deg_tripole-ocnlon.gif
+
+Latitude Plot
+-------------
+
+\image html  GRID_mom5_1deg_tripole-ocnlat.gif
+
+Mask Plot
+---------
+
+\image html GRID_mom5_1deg_tripole-ocnmask.gif
+
+Data Decomposition
+------------------
+
+The grid and data decomposition is done regularly on a preferred 6x4
+processor layout. The 360 longitudinal values is regularly decomposed
+into 6 ranges and the 200 latitudinal values is regularly decomposed
+into 5 ranges.
+
+* \subpage GRID_mom5_lonlat
+
+For example, on the first processor, longitude range is [-279.5,
+-220.5], latitude range is [-81.5, -32.5]; on the second processor,
+longitude range is [-219.5, -160.5], latitude range is [-81.5, -32.5];
+and finally on the last processor, longitude range is [20.5, 79.5],
+latitude range is [40.5, 89.5].  
+
+References
+----------
+
+1. Murray, R. J., 1996: Explicit generation of orthogonal grids for
+  ocean models. Journal Computational Physics, 126, 251--273
+
+2. http://nomads.gfdl.noaa.gov/CM2.X/documentation/ocean_grid_doc.html
+
+3. Section 4.2 http://data1.gfdl.noaa.gov/~arl/pubrel/r/mom4p1/src/mom4p1/doc/guide4p0.pdf
+
+4. GridSpec file describing the grid on Zeus:  /home/Fei.Liu/noscrub/global_box1/INPUT/grid_spec.nc
+
+\todo The referenced GridSpec file on Zeus is gone because the NOAA
+Zeus cluster was retired.  Either remove reference \#4 or find the
+file
\ No newline at end of file
Index: checkout/doc/links.md
===================================================================
--- checkout/doc/links.md	(nonexistent)
+++ checkout/doc/links.md	(revision 94669)
@@ -0,0 +1,52 @@
+Relevant Links {#links}
+==============
+
+* Other models:
+
+  * [Community Earth System Model (CESM)] (http://www2.cesm.ucar.edu/)
+
+    * [Systematically label different run configurations] (http://www.cesm.ucar.edu/models/cesm1.2/cesm/doc/modelnl/compsets.html).
+
+  * [Met Office Unified Model] (http://www.metoffice.gov.uk/research/modelling-systems/unified-model)
+
+* External Frameworks:
+
+  * [National Unified Operational Prediction Capability (NUOPC)] (https://earthsystemcog.org/projects/nuopc/)
+
+    * [NUOPC rules for model components](https://earthsystemcog.org/projects/nuopc/compliance_testing)
+
+  * [Earth System Modeling Framework (ESMF)] (http://www.earthsystemmodeling.org/)
+
+    * [ESMF Workspace] (https://www.earthsystemcog.org/projects/esmf/), the
+
+  * [Earth System Grid Federation (ESGF)] (http://esgf.llnl.gov/)
+
+  * [Earth System Prediction Suite] (https://www.earthsystemcog.org/projects/esps/)
+
+    * [Ocean components table] (https://www.earthsystemcog.org/projects/esps/ocean_models)
+
+* NEMS
+
+  * [VLAB NEMS Modeling Applications Page] (https://vlab.ncep.noaa.gov/group/guest/welcome?p_p_id=101&p_p_lifecycle=0&p_p_state=maximized&p_p_mode=view&_101_struts_action=%2Fasset_publisher%2Fview_content&_101_returnToFullPageURL=%2Fgroup%2Fguest%2Fwelcome&_101_assetEntryId=2316208&_101_type=content&_101_groupId=95585&_101_urlTitle=nems-applications&_101_redirect=https%3A%2F%2Fvlab.ncep.noaa.gov%2Fgroup%2Fguest%2Fwelcome%3Fp_p_id%3D3%26p_p_lifecycle%3D0%26p_p_state%3Dmaximized%26p_p_mode%3Dview%26_3_groupId%3D0%26_3_keywords%3Dnems%2Bapplication%26_3_struts_action%3D%252Fsearch%252Fsearch%26_3_redirect%3D%252Fgroup%252Fguest%252Fwelcome&inheritRedirect=true)
+
+  * [WAM-IPE space weather coupling] (http://cog-esgf.esrl.noaa.gov/projects/wam_ipe/)
+
+  * [Git code versioning system] (https://git-scm.com/)
+
+  * [Subversion (SVN)] (https://subversion.apache.org/)
+
+  * [VLab Wiki guidance] (https://vlab.ncep.noaa.gov/redmine/projects/vlab/wiki/Help_for_Project_OwnersManagers#Adding-Members-to-a-Project)
+
+  * [NEMS documentation survey spreadsheet] (https://docs.google.com/spreadsheets/d/1CLT66uzJrjrsY-um0jB5hU-Gfeh3_VCIJDA4-Ibmu5s/edit#gid=0).
+
+* Other Links:
+
+  * [Next-Generation Global Prediction System (NGGPS)] (http://www.nws.noaa.gov/ost/nggps/)
+
+  * [NWP Information Technology Environment (NITE)] (http://www.dtcenter.org/eval/NITE/)
+
+    * [preliminary NITE design] (http://www.dtcenter.org/eval/NITE/NITE-report-AOP2014.pdf)
+
+-
+
+\todo Update link to WAM-IPE webpage when if it is moved to repo
Index: checkout/doc/running.md
===================================================================
--- checkout/doc/running.md	(nonexistent)
+++ checkout/doc/running.md	(revision 94669)
@@ -0,0 +1,886 @@
+Running: NEMSCompsetRun {#running}
+=======================
+
+The old regression test system has been replaced by a new system.  It
+has a different design that the old one.  It has a superset of the
+capabilities of the old system, but the different design leads to
+advantages and disadvantages.
+
+Presently, that implementation is available by the NEMS/tests/rtgen
+script, and two scripts it generates (rtrun, rtreport).  For backward
+compatibility, there is a wrapper "rt.sh" script to prevent users from
+having to learn a new system if they are only running the regression
+tests (not modifying them).
+
+Design and Capabilities
+------------------------
+
+This system works on a different principle than the older one.  The
+old system ran shell scripts specific to each model or test which
+copied files from outside the NEMS test area and ran external programs
+to generate some inputs.
+
+The new system has a directory of prepared inputs, has no external
+dependencies, and simply runs the NEMS executable without any
+test-specific scripts.  In other words, scripts like the
+`exglobal_fcst_nems.sh` are no longer used.  This makes porting and
+workflow changes simpler, but has the disadvantage of not testing
+model workflow scripts.  That disadvantage is intentional; the purpose
+of the NEMS regression tests is to test the NEMS, not model workflow
+scripts.
+
+Running the System
+------------------
+
+This section explains how to run the system in its simplest form.
+Later sections discuss
+\ref run_sub "running subsets of the compsets"
+\ref dep_res "dependency resolution", and
+\ref list_avail "available compsets".
+We provide two methods: a
+simple way using the rt.sh wrapper, and a more complex way that
+provides complete control and flexibility.
+
+### Simple Method: rt.sh
+
+For backward compatibility, there is an rt.sh script that acts
+similarly to the old rt.sh.  Some aspects are different to give extra
+flexibility.
+
+To execute in an sh-family shell (sh, bash, ksh, etc.)
+
+    cd NEMS/tests
+    ./rt.sh (options) > rt.log 2>&1 &
+
+To execute in a csh-family shell (csh, tcsh):
+
+    cd NEMS/tests
+    ./rt.sh (options) >& rt.log &
+
+This will run rt.sh in the background and send all output to the
+`rt.log` file.  To see the success or failure information, look in the
+`rt.log` file.
+
+The `(options)` specify what is to be run.  Common needs are:
+
+* `-f` = run the full test suite
+* `-s` = only run the "standard" tests
+* `-t setname` = run the specified set of tests.  See
+`compsets/all.input` for the full list.  Common names are `standard`,
+`gfs`, and `nmm`
+* `-b setname` = regenerate the baseline.
+* `-n /path/to/baseline` = specifies the location of the baseline
+when running the suite in verification or baseline generation modes.
+* `-r PLATFORM:/path/to/rtgen.###` - used by the full test method.
+See below.
+* `-p project` = set the project or account to use for CPU hours.
+If unspecified, one will be automatically picked based on 
+cpu availability.
+
+### Full Test Method
+
+The process of running is:
+
+    ./NEMS/tests/rtgen   # generates rtrun and rtreport commands
+    /path/to/USERNAME/rtgen.(ID)/rtrun (options)
+    /path/to/USERNAME/rtgen.(ID)/rtreport
+
+To use this for a commit to the trunk, one must copy the results to
+the NEMS/tests directory.  This could be done manually, or one could
+run rt.sh and tell it to skip the rtgen step.  To do this, use the
+`rt.sh -r` option:
+
+    ./rt.sh -r (PLATFORM):/path/to/USERNAME/rtgen.(ID)
+
+where `(PLATFORM)` is "theia" or "wcoss.phase1"
+
+The rest of this section explains the purpose and function of rtgen,
+rtrun and rtreport.
+
+### Step 1: Generate Test Scripts (rtgen)
+
+The first step is to run `rtgen`.  This will generate a set of scripts
+to run the requested tests.  If you do not request any tests, it will
+run all tests.
+
+    ./NEMS/tests/rtgen
+
+That command will give you instructions and will log the more
+important parts of its execution:
+
+    11/17 18:42:38Z rtgen-INFO:  Will run all known tests.
+    11/17 18:42:50Z rtgen-INFO:  Auto-chosen project for job submission is 'cmp'
+    11/17 18:42:51Z rtgen-INFO:  Auto-chosen ptmp is '/path/to/USERNAME'
+    11/17 18:42:51Z rtgen-INFO:  Generating workflow with id 23768.
+    11/17 18:42:55Z rtgen-INFO:  Requested test has been generated.
+You need to run the test now.   You have three options:
+
+OPTION 1: Put this in your cron:
+*/3 * * * * /path/to/USERNAME/rtgen.23768/rtrun --step --zero-exit \\
+> /path/to/USERNAME/rtgen.23768/rtrun-cron.log 2>&1
+
+OPTION 2: Run this program:
+/path/to/USERNAME/rtgen.23768/rtrun --loop
+
+OPTION 3: Verbose mode: run this program:
+/path/to/USERNAME/rtgen.23768/rtrun) -v --loop
+Adding -n to that command will disable colors.
+
+### Step 2: Run the Test (rtrun)
+
+The rtrun command runs the tests until all have succeeded or failed.
+You have three options for how to run this.  The easiest execution
+option is number 3, which runs on the command line and reports the
+queue status every few minutes.  The path to rtrun will vary, but the
+command will look something like this:
+
+    /path/to/USERNAME/rtgen.23768/rtrun -v --loop
+
+If the colors annoy you, add the `-n` switch, and if you don't want
+the queue state, remove the `-v` switch.
+
+The components of that path are:
+
+* `/path/to` - a scrub area, such as /scratch4/NCEPDEV/stmp4 or /ptmpp1
+* `USERNAME` - your username, such as `emc.nemspara` or `Samuel.Trahan`
+
+The `rtrun` command will generate output like this:
+
+    11/17 00:19:21Z rtrun INFO: check dependencies and submit jobs...
+    11/17 00:19:22Z rtrun INFO: check status...
+    11/17 00:19:22Z rtrun INFO: workflow is still running and no jobs have failed.
+    11/17 00:19:22Z rtrun INFO: sleep 2
+    11/17 00:19:24Z rtrun INFO: get queue information
+
+Job ID  Reserv   Queue   Procs ST Queue Time  Stdout Location
+-------- ------ --------- ----- -- ----------- ------------------------------------
+573626        dev          64 R  11/17 00:14 /.../tmp/log/test_gfs_gocart_nemsio.log
+From bjobs -l  -u Samuel.Trahan (age 0 sec.)
+11/17 00:19:24Z rtrun INFO: sleep 100
+
+It will keep looping until all jobs have succeeded or failed.  If all
+goes well, the tests will all pass and you will see this message:
+
+    11/17 00:21:04Z rtrun INFO: check dependencies and submit jobs...
+    11/17 00:21:05Z rtrun INFO: check status...
+    11/17 00:21:05Z rtrun INFO: workflow is complete and all jobs succeeded.
+
+### Step 3: Report Results (rtreport)
+
+At that point, you can run rtreport to get a report of the tests.
+Actually, you can run rtreport at any time.  If the tests are not yet
+complete, it will tell you which ones are complete.  It will report
+all it knows about failed tests too.  There are two output formats:
+
+To run:
+
+    /path/to/USERNAME/rtgen.23768/rtreport [mode]
+
+Where the optional `mode` is one of:
+
+* `status` - short output that only lists failed tests and counts
+the number of failed, complete, and unfinished tests.
+
+* `txt` - full text output of all information (the default).
+
+The output of `txt` mode (the default) looks something like this
+
+    BUILD nmm.x: SUCCEEDED
+    BUILD nmm.debug.x: SUCCEEDED
+    BUILD gsm.x: SUCCEEDED
+    BUILD gsm_gocart.x: SUCCEEDED
+    TEST #1: PASS
+    Test nmm_cntrl starting.
+    Wed Nov 16 22:51:23 UTC 2016
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0000h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0024h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0048h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0000h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0024h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0048h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_bin_0024h_00m_00.00s: bit-for-bit identical
+    .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_nio_0024h_00m_00.00s: bit-for-bit identical
+    TEST PASSED
+    TEST #2: PASS
+    Test nmm_nemsio starting.
+    ... information about more tests ...
+
+
+### Rerunning Failed Tests
+
+If a test fails, you can request that it be rerun via the `rtrewind`
+command.  The command is located in the same directory as `rtrun`
+and can be called in two different ways:
+
+    /path/to/USERNAME/rtgen.23768/rtrewind -a
+
+    /path/to/USERNAME/rtgen.23768/rtrewind job1 [job2 [...]]
+
+The first method requests a rerun of ALL tests and builds while the
+second requests only certain ones be rerun.
+
+The jobs (`job1`, `job2`, ...) are the names from the test suite such
+as `gsm.x` or `nmm_cntrl`.  You can optionally include `test_` or
+`build_` before the name, as it is printed by the `rtreport` command.
+
+\anchor run_sub
+### Running Subsets of the Test Suite
+
+The test suite, as of this writing, has 48 tests and 5 build options.
+Frequently, you only want to run a few of them.  The `rtgen` script
+has a simple set arithmetic language for specifying what to run.  The
+subsetting is done by the command line.  For example, to run all
+standard nmm tests, you need to take the intersection of those two
+sets of tests:
+
+    ./NEMS/tests/rtgen 'inter(nmm,standard)'
+
+The `rtgen` will generate a workflow to run just those tests.  
+
+Other subsetting operations:
+
+union(nmm,wam)   # run all nmm and wam tests
+minus(gfs,wam)   # run all gsm (gfs) tests that are not wam tests
+{gfs_slg,nmm_cntrl}  # run the gfs_slg and nmm_cntrl tests
+
+You can combine multiple operations:
+
+minus(inter(union(gfs,nmm),standard),{gfs_slg,nmm_cntrl})
+
+That will ask rtgen to run all gsm (gfs) and nmm tests that are
+standard tests, except for `gfs_slg` and `nmm_cntrl`.
+
+Despite that, the rtgen will still run the gfs_slg test.  Why?
+Dependency resolution.
+
+\anchor dep_res
+### Dependency Resolution 
+
+Some tests have dependencies, and `rtgen` will resolve those
+dependencies automatically, similar to how `make` works.  For example,
+the `gfs_slg_rsthst` requires the `gfs_slg` to run first.  Output from
+`gfs_slg` is used as input to `gfs_slg_rsthst`.  If you ask `rtgen` to
+run `gfs_slg_rsthst` without running `gfs_slg`, it will see the
+dependency and add `gfs_slg` to your list of tests.  The builds are
+handled the same way.  The `gfs_slg` has a dependency on the build
+`gsm.x`, and so `rtgen` will always add the `gsm.x` build if you
+select the `gfs_slg` test.
+
+\anchor list_avail
+### List of Available Compsets and Sets
+
+The configuration for `rtgen` is stored in the compsets/all.input file
+in the app level repository.  This is where you specify the available
+tests and sets of tests.
+
+The top few lines of that file look like this
+
+    load 'gsm.input'
+    load 'nmm.input'
+    run nmm_cntrl              @ nmm, standard, baseline, nmmglob
+    run nmm_nemsio             @ nmm,                     nmmglob
+    run nmm_rest               @ nmm,                     nmmglob
+    ... many more "run" statements ...
+
+The first two lines import the details of the test from other files.
+The lines beginning with `run` specify a test to run and the sets it
+belongs to.  The test must be one declared in the other file,
+as discussed later in this document.
+
+The list of sets after the @ sign are the ones recognized by the
+\ref run_sub "subsetting functionality of rtgen".
+
+Note that you can enable tests on only certain platforms by including
+a comparison operator in the list of subsets:
+
+run gfs_slg_2thread        @ gfs, standard, baseline, slg, plat==wcoss.phase1
+
+This line ensures the `gfs_slg_2thread` is only available on WCOSS Phase 1.
+
+
+
+Work Area Contents
+------------------
+
+Running the `rtgen` creates a directory in a scrub area which will
+contain the generated scripting system, input and output files, logs,
+and resource usage information.  This section documents those files
+and directories.
+
+Recall that running `rtgen` creates a directory with a name like this:
+
+    /path/to/USERNAME/rtgen.23768
+
+That directory contains the following:
+
+* rtrun script
+
+* rtreport script
+
+* jobs directory
+
+* scripts directory
+
+* ush directory
+
+* src directory
+
+* install.sh
+
+* uninstall.sh
+
+* exec directory
+
+* include directory
+
+* rocoto directory
+
+* com directory
+
+* tmp directory
+
+* tmp/log directory
+
+### Jobs, Scripts and Ush
+
+These are the three tier NCEP workflow directories and have the usual
+meanings:
+
+* jobs - sets up the environment and passes control to the "scripts" level
+
+* scripts - high-level logic for each test
+
+* ush - low-level utility functions
+
+For each test, there is one "jobs" directory file and one "scripts"
+directory file.  The "scripts" directory and "jobs" directory are
+populated by the tests blocks which will be discussed in great detail
+in the \ref desc_lang "Compset Description Language" section.  They are
+generated from the [test blocks](new-tests).
+
+### Src, Exec, and Include
+
+The `src` directory does not contain source code.  Instead, it
+contains two scripts that describe how to build or uninstall the
+`NEMS.x`
+
+* install.sh - knows how to build the NEMS.x based on the instructions
+in the [build blocks](new-build) as explained in the
+[Test Description Language](desc_lang) section in great detail.
+
+* uninstall.sh - deletes the copies of `NEMS.x` and `modules.nems`
+created by install.sh.
+
+The `install.sh` creates executables and modulefiles which are copied
+into the `exec` and `include` directories.
+
+* exec - one executable for each NEMS build
+
+* include - one file for each NEMS build containing a sequence of
+of "module load" commands.  These commands will be run before
+executing the NEMS.x
+
+### Rocoto Directory
+
+The `rtgen` makes one file in the `rocoto` directory.  The `rtrun`
+will create a second file.
+
+* workflow.xml - the definition of the workflow generated by `rtgen`.
+This includes dependencies and resource requirements.  There is one
+shell command for each test or build.
+
+* workflow.db - created by `rtrun`, this contains the Rocoto internal
+state information.
+
+### Tmp and Logs
+
+The `tmp` directory contains all logs and all execution directories
+for each test.
+
+* tmp/log/rocoto.log - log file from Rocoto.  Contains information about
+batch system events, such as job failures or job submissions.
+
+* tmp/log/*.log - all other files contain logs about a test or build
+
+* tmp/* - all other directories are work areas for tests.  They
+contain inputs and outputs from the NEMS.x
+
+### Scripts rtrun and rtreport
+
+These are discussed in earlier sections.  The scripts are generated
+automatically by `rtgen`.  The `rtrun` runs Rocoto and the `rtreport`
+scans the reports, combining them into one text file.
+
+### COM directory
+
+This directory contains one subdirectory for each test with all
+verified files as described in a test's \ref criteria block.
+It also contains the "report.txt" file with the report of the test
+success or failure.
+
+\anchor desc_lang
+5.4 Compset Description Language
+--------------------------------
+
+This chapter discusses the language used by the `rtgen` tool to
+describe regression tests and compsets.  The language consists of
+"modules" which are simply a collection of variables and functions. A
+module has a type: build, test, hash, etc.  A set of `run` commands
+list which runnable modules should be executed.
+
+### Variable Definitions and Modules
+
+The simplest type of module is a hash, which looks like this:
+
+    nems_vars={
+    atm_model='none'
+    atm_petlist_bounds="-1 -1"
+    ocn_model='none'
+    ocn_petlist_bounds="-1 -1"
+    ice_model='none'
+    ice_petlist_bounds="-1 -1"
+    med_model='nems'
+    med_petlist_bounds="-1 -1"
+    med_atm_coupling_interval_sec='-1'
+    med_ocn_coupling_interval_sec='-1'
+    }
+
+In this example, we have declared a hash called `nems_vars` which
+contains several variables, such as `atm_model` and
+`atm_petlist_bounds`.  Later on, another module declaration can "use"
+this module, to import its variables:
+
+    nmm_vars_global={
+    use plat%nmm_dflt
+    use nems_vars
+    use common_vars
+    use nmm_vars
+    use nmm_aliases
+    use nmm_uncoupled
+    GBRG="glob"
+    CNTL_NAME='NMMB_glob'
+    }
+
+Values can include variable substitution, which uses a similar syntax
+as shell, but with different escape characters:
+
+    common_vars={
+    THRD=1
+    WLCLK=15
+    GEFS_ENSEMBLE=0
+    GEN_ENSEMBLE=0
+    WRITE_DOPOST='.false.'
+    POST_GRIBVERSION='grib1'
+    CONF="@[plat%PARMnems]"
+    }
+
+Here, the `CONF` variable in the `common_vars` module has the value of
+the `PARMnems` variable in the `plat` module.
+
+### Strings
+
+There are three ways of specifying a string:
+
+* Double quotes: "... text here with @[VARIABLE] expansion ..."
+* Single quotes: '... text here with no variable expansion ...'
+* Block string:
+
+    [[[multi-line string
+    with @[VARIABLE] expansion ]]]
+
+If you need to insert a literal @ into the string, you have three
+options.  In these examples, we'll use the multi-line string format:
+
+* [[[  @['this text is not expanded']   ]]]
+* [[[  @["this text is not expanded"]  ]]]
+* [[[ Simple literal @[@] ]]]
+
+###  Embedded Scripts
+
+Most of the scripts required to run the tests are automatically
+generated, but there are occasional instances when you need to specify
+specific code.  This is done via `embed` blocks:
+
+    embed bash nems_regtest_prep(RUNDIR,modules,CNTL) [[[
+    mkdir -p "$RUNDIR" "$CNTL"
+    cd @[RUNDIR]
+    source "$modules"
+    export MPI_TYPE_DEPTH=20
+    export ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4
+    ]]]
+
+In this example, we have embedded a bash script called
+`nems_regtest_prep`.  
+
+#### Embedded Script 
+Variables: $ vs. @
+
+In the example script, there are two methods of doing variable substitution:
+
+* `@[RUNDIR]`
+* `"$RUNDIR"`
+
+They have slightly different meanings.  In the case of `@[RUNDIR]`,
+the value of the `RUNDIR` variable is substituted directly in the
+generated script.  If the variable contained any shell metacharacters,
+those would be copied verbatim.  In the case of `$RUNDIR`, the bash
+variable is used instead.  That variable's value is set before the
+code in `nems_regtest_prep` is run.
+
+Either approach is valid.  It is up to the user to decide which one to use.
+
+### Platform Detection
+
+The test suite needs to reconfigure certain aspects based on platform;
+WCOSS vs. Theia vs. GAEA, etc.  This is done with `platform` blocks.
+These are simply modules with a `detect` function.  After all
+platforms are defined, an `autodetect` block selects between them.
+
+Here is an example of a platform.  This is the one for Phase 1 of WCOSS.
+
+    platform wcoss.phase1 {
+    use wcoss.common
+    CPU_ACCOUNT='NAM-T2O'
+    pex='1'
+    cores_per_node=32
+    MPI='LSF'
+    SHORT_TEST_QUEUE='&SHORTQ;'
+    LONG_TEST_QUEUE='&LONGQ;'
+    BUILD_QUEUE='&BUILDQ;'
+
+    embed bash detect [[[
+    # This function is used at PARSE TIME to detect whether we are
+    # on WCOSS Phase 1.  It must be very fast and low resource
+    # usage since the parser runs it.
+    if [[ -d /usrx && -d /global && -e /etc/redhat-release && \\
+    -e /etc/prod ]] ; then
+    # We are on WCOSS Phase 1 or 2.
+    if ( ! cat /proc/cpuinfo |grep 'processor.*32' ) ; then
+    # Fewer than 32 fake (hyperthreading) cpus, so Phase 1.
+    exit 0
+    fi
+    fi
+    exit 1
+    ]]]
+    ... more wcoss stuff ...
+    }
+
+Note the `embed bash` block called `detect`.  This is the bash
+function that is run to detect whether the script is running on WCOSS
+Phase 1.
+
+Once all platforms are defined, there is an autodetect block:
+
+    autodetect plat (/ wcoss.phase1, theia /)
+
+This will define the `plat` variable, which is a duplicate of either
+`wcoss.phase1` or `theia`.
+
+###  Build Definition
+
+The `build` blocks define a method of building an executable.  They
+must define three variables and a function:
+
+* `NEMS.x` = path to the NEMS executable created by this build
+
+* `modules.nems` = list of "module load" commands to execute before
+running the executable
+
+* `target` = file to check to ensure the build succeeded; should be
+the same as the `NEMS.x` variable
+
+* `build` = an `embed bash` function that builds the program.
+
+Here is an example.  This builds the GOCART-capable standalone GSM in
+the NEMSLegacy branch:
+
+    build gsm_gocart.x {
+    use plat
+    NEMS.x="@[plat%EXECrt]/NEMS_gocart.x"
+    modules.nems="@[plat%INCrt]/NEMS_gocart.x.modules"
+    target="@[NEMS.x]"
+    build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
+    OPTS="app=GSM-GOCART")
+    }
+
+The NEMSAppBuilder function is declared elsewhere.  It is used by most
+of the `build` definitions to avoid duplication.  That function looks
+like this:
+
+    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
+    [[[
+    mkdir -p "@[plat%EXECrt]" "@[plat%INCrt]"
+    rm -f "@[NEMS.x]" "@[modules.nems]"
+    cd @[plat%HOMEnems]
+
+    # NOTE: Replace "rebuild" with "norebuild" to disable "gmake clean"
+    ./NEMS/NEMSAppBuilder rebuild $OPTS
+
+    cd @[plat%SRCnems]
+    cp -fp ../exe/NEMS.x "@[NEMS.x]"
+    cp -fp conf/modules.nems "@[modules.nems]"
+    ]]]
+
+Notice that the four variables we're passing from gsm_gocart.x%build
+are in the definition line of NEMSAppBuilder:
+
+    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
+    ...
+    build gsm_gocart.x {
+    ...
+    build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
+    OPTS="app=GSM-GOCART")
+
+### Tests
+
+A test is a module that defines the following:
+
+* dependencies - any other tests or builds that have to run first
+
+* `prep` - a preparation step to run before anything else.  This is
+generally `mkdir`, `module` or `cd` commands.
+
+* `input` - a `filter` block that provides a list of input files or
+directories and instructions on how to copy or filter them.  This is
+described below.
+
+* `execute` - a `spawn` block that describes how to run the `NEMS.x`.
+This is also used to generate job cards to request the needed
+resources.
+
+* `output` - criteria for validating the test output.  These are
+usually `criteria` blocks, described below.
+
+This is the `test` block for the global nmm control.  Later text
+describe the meaning of each part:
+
+    # nmm_cntrl test
+    test nmm_cntrl: nmm.x {
+    use nmm_vars_global
+
+    # Convenience variables:
+    RUNDIR_ROOT="@[plat%TMPrt]"
+    RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+    TEST_DESCR="Compare NMMB-global results with previous trunk version"
+    CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+    TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+    COM="@[plat%COMrt]/@[TEST_NAME]"
+
+    criteria output {
+    #    NEMS.x output file --------- comparison - control file or dir
+    "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    }
+
+    # The prep is run at the top of any job.  It should do such things
+    # like making directories and loading modules.
+    prep=nems_regtest_prep(
+    RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
+    CNTL="@[CNTL]")
+
+    # The execute step runs the program:
+    spawn execute {
+    { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
+    }
+
+    filters input {
+    # work file         operation   input file
+    "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
+    "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
+    "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
+
+    "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
+    "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
+
+    "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
+    "model_configure"        .copy. "configure_file_01"
+
+    "*"                   .copydir. "@[plat%NMM_DATA]"
+
+    "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
+    "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
+    "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
+    "fort.28"                .link. "global_o3prdlos.f77"
+    "fort.48"                .link. "global_o3clim.txt"
+
+    "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
+    "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
+    }
+    }
+
+#### Test Dependencies
+
+The first line (after the comment) is this:
+
+    test nmm_cntrl: nmm.x {
+
+The `: nmm.x` indicates that the `nmm.x` build has to run before the
+`nmm_cntrl` can start.  The test suite will include that dependency in
+its Rocoto or ecFlow automation system.
+
+#### Test Prep
+
+The prep step is a simple script that prepares the environment.  In
+this case, it just runs the nems_regtest_prep, which we discussed
+earlier:
+
+    # The prep is run at the top of any job.  It should do such things
+    # like making directories and loading modules.
+    prep=nems_regtest_prep(
+    RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
+    CNTL="@[CNTL]")
+
+Note that it refers to `@[RUNDIR]` and `@[CNTL]`.  Those variables are
+defined earlier in the same test:
+
+    # Convenience variables:
+    RUNDIR_ROOT="@[plat%TMPrt]"
+    RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+    TEST_DESCR="Compare NMMB-global results with previous trunk version"
+    CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+    TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+    COM="@[plat%COMrt]/@[TEST_NAME]"
+
+#### Test Input Filter
+
+This block specifies the input files and how to prepare them.  It
+declares an `input` variable inside the `nmm_cntrl` test, which is of
+type `filters`:
+
+    filters input {
+    # work file         operation   input file
+    "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
+    "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
+    "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
+
+    "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
+    "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
+
+    "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
+    "model_configure"        .copy. "configure_file_01"
+
+    "*"                   .copydir. "@[plat%NMM_DATA]"
+
+    "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
+    "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
+    "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
+    "fort.28"                .link. "global_o3prdlos.f77"
+    "fort.48"                .link. "global_o3clim.txt"
+
+    "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
+    "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
+    }
+
+Notice that there are four different operations in the middle column:
+
+| Local file          | Operation   | Remote file or directory        |  
+| ------------------- | ----------- | ------------------------------- |
+| `"GWD_bin_01"`      | `.copy.`    | `"@[TEST_IN]/GWD_bin_01"`       |
+| `"*"`               | `.copydir.` | `"@[plat%NMM_DATA]"`            |
+| `"fort.28"`         | `.link.`    | `"global_o3prdlos.f77"`         |
+| `"atmos.configure"` | `.atparse.` | `"@[CONF]/atmos.configure_nmm"` |
+
+* `.copy.` - copies the remote file (third column) to the local file
+(first column).  
+
+    cp -p "$third_column" "$first_column"
+
+* `.link.` - makes a symbolic link to the remote file (third column)
+from the local file (first column)
+
+    ln -s "$third_column" "$first_column"
+
+* `.copydir.` - copies from the remote file or directory (third
+column) all files that match the glob (first column) into the local
+directory.
+
+    cp -rp "$third_column"/$first_column
+
+* `.atparse.` - runs the remote file (third column) through a filter
+to create the local file (first column).  The filter will replace
+text like `@[varname]` with the corresponding variable.  
+
+In the `.atparse.` variable replacement, only variables from the
+test's module are replaced.  Hence, if you want many variables
+accessible to `.atparse.`d files, you need to either declare or
+`use` them.  The `nmm_cntrl` test does that at the top of its
+declaration:
+
+    test nmm_cntrl: nmm.x {
+    use nmm_vars_global
+
+    # Convenience variables:
+    RUNDIR_ROOT="@[plat%TMPrt]"
+    RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+    TEST_DESCR="Compare NMMB-global results with previous trunk version"
+    CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+    TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+    COM="@[plat%COMrt]/@[TEST_NAME]"
+
+Everything in the `nmm_vars_global` module will be available plus
+all six of the declared "convenience variables"
+
+Note that variables with a period (".") or percent ("%") in their
+name are not yet available.  That will be fixed in a later release.
+
+#### Test Execution
+
+The next step is to actually run the `NEMS.x`:
+
+    # The execute step runs the program:
+    spawn execute {
+    { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
+    }
+
+The columns inside the `execute` block have these meanings:
+
+* `"@[nmm.x%NEMS.x]"` - the program to run
+
+* `ranks="@[TASKS]"` - number of mpi ranks
+
+* `threads="@[OpenMPThreads]"` - optional; number of threads per rank.
+Default is 1.
+
+* ppn=8 - not used here; optional.  Specifies the number of MPI ranks
+per node.  The GSM needs this due to memory limits.  Default is
+calculated automatically by the system, and will be the largest
+number of MPI ranks possible.
+
+\anchor criteria
+#### Test Verification or Baseline Generation
+
+The last step is to either verify the results or generate the
+baseline.  Both cases are handled by the output criteria block:
+
+    criteria output {
+    #    NEMS.x output file --------- comparison - control file or dir
+    "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    "nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    }
+
+The columns have this meaning:
+
+* `"nmmb_hst_01_bin_0000h_00m_00.00s"` - local directory file
+
+* `.bitcmp.` - verification method.  Only `.bitcmp.` is supported for now.
+
+* `"@[CNTL]"` - remote directory file or remote directory that
+contains the baseline.  If it is a remote directory, the file is
+assumed to have the same name.
+
+In verification mode, the comparisons are performed after running NEMS.x
+
+In baseline generation mode, the local file (first column) is copied
+to the remote location (third column).
Index: checkout/doc/HowTo_OldToNewStruct_main.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_main.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_main.md	(revision 94669)
@@ -0,0 +1,64 @@
+NEMS: Old to New Structure How To  {#HowToOldToNew}
+==================================
+ 
+_Purpose:_ This document will walk through the steps of how to go from
+the old NEMS App folder and svn structure, using the original
+NEMSAppBuilder and NEMSCompsetRun to the
+\ref structure "new NEMS App folder and svn structure",
+the updated NEMSAppBuilder and the regression test run system.  As an
+example, we will be taking the WW3TestBed, an app in the the old
+structure which is a two component system of GSM before it had a cap
+and WW3, and creating an app with the new app structure, build and run
+systems with WW3 and GSM, including upgrading GSM to its trunk.
+
+ _Assumptions:_  
+This tutorial assumes you have an app working with the original/old
+appBuilder and NEMSCompsetRun.  A separate tutorial for adding a new
+component into the new system will be provided.
+ 
+_Starting Point:_ 
+We are starting out with the WW3TestBed App. To get a better idea of the starting point: 
+ 
+    $ svn co -r 83803 https://svnemc.ncep.noaa.gov/projects/nems/apps/WW3TestBed/trunk WW3TestBedr83803
+    $ cd WW3TestBedr83803
+    $ svn propget svn:externals 
+    # for snapshot revisions strictly version all constituent components
+    #NEMS                 -r 72098 https://svnemc.ncep.noaa.gov/projects/nems/branches/NUOPC/development
+    #NEMS/src/atmos/GSM   -r 71778 https://svnemc.ncep.noaa.gov/projects/gsm/branches/NUOPC/gsm67963branch
+    #NEMS/src/atmos/nmm   -r 71873 https://svnemc.ncep.noaa.gov/projects/nems/external_comps/NMM
+    #WW3                  -r 72006 https://svnemc.ncep.noaa.gov/projects/ww3/branches/esmf2/model
+    # for development switch to head revisions
+    #NEMS                 https://svnemc.ncep.noaa.gov/projects/nems/branches/NUOPC/development
+    #NEMS/src/atmos/GSM   https://svnemc.ncep.noaa.gov/projects/gsm/branches/NUOPC/gsm67963branch
+    #NEMS/src/atmos/nmm   https://svnemc.ncep.noaa.gov/projects/nems/external_comps/NMM
+    #WW3                  https://svnemc.ncep.noaa.gov/projects/ww3/branches/esmf2/model
+    # Test bed for example of running NEMS in WW3
+    NEMS                -r 83802 https://svnemc.ncep.noaa.gov/projects/nems/branches/UGCS-Seasonal/twoWayWW3_from_r80562
+    NEMS/src/atmos/gsm  -r 80525 https://svnemc.ncep.noaa.gov/projects/gsm/trunk
+    NEMS/src/atmos/nmm  -r 80429 https://svnemc.ncep.noaa.gov/projects/nmmb/trunk
+    NEMS/src/chem       -r 80344 https://svnemc.ncep.noaa.gov/projects/nceplibs/chem/trunk
+    WW3                 -r 82617 https://svnemc.ncep.noaa.gov/projects/ww3/branches/esmf2/model
+    $ ls 
+    NEMS  WW3  ww3TestBed.appBuilder  ww3TestBed.compsetRun
+
+Note, here we have 4 externals, NEMS, GSM, NMM, CHEM, and WW3 and all compsets are in the NEMS/compset directory. 
+ 
+To build and run: 
+
+    ./NEMS/NEMSAppBuilder
+    ./NEMS/NEMSCompsetRun -compset NEMS/compsets/cfsr%20150401_1day_leapfrog_gsm%slg%T126_ww3%t188
+
+This runs a two way coupled GSM<->WW3 system on a T126 grid, similar
+to what is documented in \ref milestone_DREV84205
+
+Stages of this tutorial:
+
+* \subpage HowToOldToNewFolder
+
+* \subpage HowToOldToNewAppBuilder
+
+* \subpage HowToOldToNewCompBranch
+
+* \subpage HowToOldToNewCompsets
+
+* \subpage HowToOldToNewBuildRun 
Index: checkout/doc/REPORT-20170204-WAM-IPE-1way.md
===================================================================
--- checkout/doc/REPORT-20170204-WAM-IPE-1way.md	(nonexistent)
+++ checkout/doc/REPORT-20170204-WAM-IPE-1way.md	(revision 94669)
@@ -0,0 +1,216 @@
+One-Way WAM to IPE Coupling Test Report {#REPORT-20170204-WAM-IPE-1way}
+=======================================
+
+\date 2/4/2017
+
+Versioning
+----------
+
+**User**: Robert.Oehmke
+
+**Project**: nems
+
+**Platform**: Theia Cray CS400
+
+**ESMF version**: ESMF_7_0_0
+
+### Repositories
+
+Application:
+
+    https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+    -r 87779 (February 2017)
+
+NEMS:
+
+    https://svnemc.ncep.noaa.gov/projects/nems/branches/WAM-IPE/milestone3
+    -r 76674 (May 2016)
+
+WAM/GSM:
+
+    https://svnemc.ncep.noaa.gov/projects/gsm/branches/WAM-IPE/milestone3
+    -r 76469 (May 2016)
+
+IPE:
+
+    https://github.com/IonospherePlasmasphereElectrodynamics/ipe
+    -r 404 (January 2017)
+
+IPE_CAP:
+
+    https://svnemc.ncep.noaa.gov/projects/ipe/branches/nuopc_cap
+    -r 87260 (January 2017)
+
+### Model Versions
+      
+ * Ionosphere Plasmasphere Electrodynamics (IPE) model R362 (May 2016)
+
+ * Whole Atmosphere Model (WAM) R76469 (May 2016)
+
+Execution
+---------
+
+### Environment
+
+    COMPONENTS=( GSM, IPE, DATAWAM, DATAIPE)
+    IPE_SRCDIR=$ROOTDIR/IPE
+    IPE_BINDIR=$ROOTDIR/IPE-INSTALL
+    DATAWAM_SRCDIR=$ROOTDIR/DATAWAM
+    DATAWAM_BINDIR=$ROOTDIR/DATAWAM-INSTALL
+    DATAIPE_SRCDIR=$ROOTDIR/DATAIPE
+    DATAIPE_BINDIR=$ROOTDIR/DATAIPE-INSTALL
+    source /etc/profile
+    module load intel impi netcdf
+        Intel: 14.0.2
+        Intel MPI: 4.1.3.048
+        NetCDF: 4.3.0
+    module use /scratch4/NCEPDEV/nems/save/Gerhard.Theurich/Modulefiles
+    module load esmf/7.0.0
+
+### NEMS Compsets
+
+#### `swpc%20130316_nodensities_6day_spacewx_gsm%wam%T62_ipe%80x170`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+###############################################################################
+#
+#  WAM-IPE coupled run
+#
+###############################################################################
+
+export TEST_DESCR="WAM-IPE 1h coupled run"
+
+# - gsm configuration ---
+export_gsm
+export CDATE=2013031600
+export WLCLK=480
+export NDAYS=6
+export FHOUT=1
+export TASKS=104
+export PE1=32
+export THRD=1
+export QUILT=.false.
+export FDFI=0
+export CP2=.false.
+export IDEA=.true.
+export IDVC=3
+export THERMODYN_ID=3
+export SFCPRESS_ID=2
+export SPECTRALLOOP=2
+
+# - IPE configuration ---
+export IPECASE=20130316_nodensities_6day_spacewx_80x170
+
+# - nems.configure ---
+export_nems
+export nems_configure=med_atm_ipm
+export atm_model=gsm
+export atm_petlist_bounds="0 15"
+export ipm_model=ipe
+export ipm_petlist_bounds="16 23"
+export med_model=spaceweather
+export med_petlist_bounds="24 103"
+export coupling_interval_fast_sec=180.0
+export coupling_interval_sec=180.0
+
+export F107_KP_SIZE=56
+export F107_KP_INTERVAL=10800
+export WAM_IPE_COUPLING=.true.
+export HEIGHT_DEPENDENT_G=.true.
+export F107_KP_SKIP_SIZE=24
+
+# - component specific setup calls ---
+setup_wam_T62_2013031600
+setup_ipe
+setup_spaceweather_gsm%wam%T62_ipe%80x170
+
+# -
+RUN_SCRIPT=rt_gfs.sh
+
+# - validation
+export CNTL_DIR=swpc%20130316_nodensities_6day_spacewx_gsm%wam%T62_ipe%80x170_V0002
+export LIST_FILES="IPE.inp SMSnamelist \
+                  sigf00 sigf01 sfcf00 sfcf01 flxf00 flxf01 \
+                  plasma00 plasma01 plasma02 plasma03 plasma04 \
+                  plasma05 plasma06 plasma07 plasma08 plasma09 \
+                  plasma10 plasma11 plasma12 plasma13 plasma14 \
+                  plasma15 plasma16 \
+                  wam3dgridnew2.nc ipe3dgrid2.nc wam2dmesh.nc"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### NEMS Configuration
+
+#### `nems.configure.med_atm_ipm.IN`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+####  NEMS Run-Time Configuration File  #####
+#############################################
+
+# EARTH #
+EARTH_component_list: MED ATM IPM
+EARTH_attributes::
+ Verbosity = max
+::
+
+# MED #
+MED_model:                      _med_model_
+MED_petlist_bounds:             _med_petlist_bounds_
+MED_attributes::
+ Verbosity = max
+ DumpFields = false
+ DumpRHs = false
+::
+
+# ATM #
+ATM_model:                      _atm_model_
+ATM_petlist_bounds:             _atm_petlist_bounds_
+ATM_attributes::
+ Verbosity = max
+::
+
+ 
+
+# IPM #
+IPM_model:                      _ipm_model_
+IPM_petlist_bounds:             _ipm_petlist_bounds_
+IPM_attributes::
+ Verbosity = max
+::
+
+# Run Sequence #
+runSeq::
+ @_coupling_interval_sec_
+   ATM -> MED :remapMethod=redist
+   MED
+   MED -> IPM :remapMethod=redist
+   ATM
+   IPM
+ @
+::
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### Runtime Data Files
+
+
+**IPE**:
+
+    /scratch3/NCEPDEV/swpc/noscrub/Naomi.Maruyama/ipe/grid/apex/1/GIP_apex_coords_global_lowres_new20120705
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/IPE/cases/20130316_nodensities_6day_spacewx_80x170/*
+
+**WAM**: 
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2013031600/*anl* 
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2013031600/wam_input_f107_kp.txt
+
+### Run Directory
+
+    /scratch3/NCEPDEV/swpc/scrub/Robert.Oehmke/rt_74190/swpc%20130316_nodensities_6day_spacewx_gsm%wam%T62_ipe%80x170
+
+Validation 
+----------
+
+To validate this run the total electron content from IPE was plotted
+four times a day each day over the entire six day run. These plots
+were then examined by the scientist working on IPE to ensure that they
+looked correct.
Index: checkout/doc/make-images-fit.css
===================================================================
--- checkout/doc/make-images-fit.css	(nonexistent)
+++ checkout/doc/make-images-fit.css	(revision 94669)
@@ -0,0 +1,3 @@
+img {
+	max-width: 100%
+}
Index: checkout/doc/GRID_wam.md
===================================================================
--- checkout/doc/GRID_wam.md	(nonexistent)
+++ checkout/doc/GRID_wam.md	(revision 94669)
@@ -0,0 +1,54 @@
+Whole Atmosphere Model (WAM) Grid {#GRID_wam}
+=================================
+
+Description
+-----------
+
+The WAM grid is the same as the 
+\ref GRID_gsm "Global Spectral Model (GSM)"
+grid except that it is extended in levels.  It is a global 3D reduced
+Gaussian grid, and the target resolution for coupling to the
+\ref GRID_IPE "Ionosphere Plasmasphere Electrodynamics (IPE)"
+grid is T62.  The
+vertical layer is in pressure and has to be converted to height in
+order to couple with IPE.
+
+<table>
+<tr>
+    <th>Long Name</th><th>Name</th><th>Value</th>
+</tr><tr>
+  <td>Number of longitudinal points (In Reduced Gaussian distribution
+    with min. 30 at the highest and lowest latitudes and max. 192 at
+    the equator)</td>
+  <td>N<sub>i</sub></td><td>192 max</td>
+</tr><tr>
+  <td>Number of latitudinal points</td>
+  <td>N<sub>j</sub></td>
+  <td>94</td>
+</tr><tr>
+  <td>From .35km to 591.44km (may vary dynamically)</td>
+  <td>Levels</td>
+  <td>150</td>
+</tr>
+</table>
+ 
+Data Decomposition
+------------------
+
+The data decomposition is based on a "shuffled" row-only distribution
+for better load balance.  The algorithm works as follows:
+
+ * Sort the rows in the descending order of the number of points per
+   row.  The rows with the most number of points will be the first.
+   (For example, for the WAM grid, the sorted row indices are: 47 46
+   45 44 43 42 41 40 39 38 37 36 35 48 49 61 60 59 58 57 56 55 54 53
+   52 51 50 34 29 64 65 66 67 62 63 28 33 32 31 30 27 69 68 26 70 71
+   25 24 72 23 20 19 21 73 22 75 74 76 77 18 79 17 78 16 80 81 14 15
+   13 82 83 12 11 84 85 10 9 8 87 86 88 76 89 90 5 91 4 93 92 1 3 2
+   94)
+
+ * Card deal the rows in the above sorted order to each processors,
+   one at a time.  For instance, if four processors are used,
+   processor #1 will get rows 47, 43, 39, 35, 60, ... and processor #2
+   will get rows 46, 42, 38, 48, 59, etc...)
+
Index: checkout/doc/Doxyfile.IN
===================================================================
--- checkout/doc/Doxyfile.IN	(nonexistent)
+++ checkout/doc/Doxyfile.IN	(revision 94669)
@@ -0,0 +1,2445 @@
+# Doxyfile 1.8.13
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING      = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME           = "--PROJECT_NAME--"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER         = "--PROJECT_NUMBER--"
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF          = "Under construction!  Content may be missing, incorrect, or garbled."
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO           =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       =
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS         = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES    = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES        = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH        =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH    =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF           = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS           = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES  = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE               = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES                =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST              =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C  = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN   = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL   = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING      =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT       = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT       = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT    = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT        = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT            = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT   = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING            = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS  = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT   = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE      = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES   = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES       = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC  = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES   = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES       = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING  = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS       =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES        = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES             = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES        = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER    =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE            =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES         =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS               = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR      = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC       = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE           =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT                  = 
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING         = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
+
+FILE_PATTERNS          = *.c \
+                         *.cc \
+                         *.cxx \
+                         *.cpp \
+                         *.c++ \
+                         *.java \
+                         *.ii \
+                         *.ixx \
+                         *.ipp \
+                         *.i++ \
+                         *.inl \
+                         *.idl \
+                         *.ddl \
+                         *.odl \
+                         *.h \
+                         *.hh \
+                         *.hxx \
+                         *.hpp \
+                         *.h++ \
+                         *.cs \
+                         *.d \
+                         *.php \
+                         *.php4 \
+                         *.php5 \
+                         *.phtml \
+                         *.inc \
+                         *.m \
+                         *.markdown \
+                         *.md \
+                         *.mm \
+                         *.dox \
+                         *.py \
+                         *.pyw \
+                         *.f90 \
+                         *.f95 \
+                         *.f03 \
+                         *.f08 \
+                         *.f \
+                         *.for \
+                         *.tcl \
+                         *.vhd \
+                         *.vhdl \
+                         *.ucf \
+                         *.qsf
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE              = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS       =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS        =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH           =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS       = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH             = --CWD--/image
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER           =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS        =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES    = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = --MAIN_PAGE--
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION    = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS        = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS              = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX     = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX          =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT            = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET        =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET  = make-images-fit.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES       =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE    = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT    = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA  = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP         = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS  = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET        = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME        = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID       = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID    = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME  = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP      = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE               =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION           =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI           = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING     =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND             = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP           = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE               =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE          = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER     = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME   =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS  =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS  =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION           =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP   = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID         = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX          = YES
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW      = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH         = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW    = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE       = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT    = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX            = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT         = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH        = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS     =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE       =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH    = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH        = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL       =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE        = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID     =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS  =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE             = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES         =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER           =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER           =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES      =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE        = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES     = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE      = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE        = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE    =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE    =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE        = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION          = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR             =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT             = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK       = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT         = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH           =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS  =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED             =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES               =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE       =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS        = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES         = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS         = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH            =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH               =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT               = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS        = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME           = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE           = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH           =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH    = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS           = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK               = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS   = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH          = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH             = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH           = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH        = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT       = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG        = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH               =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS           =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS           =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS           =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH      =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH  =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES    = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS      = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP            = YES
Index: checkout/doc/GRID_gsm.md
===================================================================
--- checkout/doc/GRID_gsm.md	(nonexistent)
+++ checkout/doc/GRID_gsm.md	(revision 94669)
@@ -0,0 +1,50 @@
+Global Spectral Model (GSM) Grid {#GRID_gsm}
+================================
+
+Description
+-----------
+
+The global Gaussian T126 grid was chosen for the initial
+atmosphere-ocean coupled system.  A reduced Gaussian grid with
+shuffled latitudes is used internally in the GSM.  As of DREV43400,
+the full grid is used in the coupler, although this may change in the
+future.  The reference for the table below is [1].
+
+The latitudes in a Gaussian grid are nearly but not quite equally
+spaced. The sines of the Gaussian grid latitudes are the zeroes of the
+Legendre polynomial of order 190 and are used for Gaussian quadrature.
+Note that the starting longitude is 0.0E and ending longitude is
+359.0625E at the equator.  In the reduced grid, the ending longitudes
+change as you go toward the poles.
+
+| Long Name                                   | Nameg   | Value               |
+| :------------------------------------------ | :----- | :------------------ |
+| Number of longitudinal points               | N<sub>i</sub>   | 384                 |
+| Number of latitudinal points                | N<sub>j</sub>   | 190                 |
+| Northernmost latitude                       | La<sub>1</sub>  | 89.277N             |
+| Easternmost longitude                       | Lo<sub>1</sub>  | 0.000E              |
+| Southernmost latitude                       | La<sub>2</sub>  | 89.277S             |
+| Westernmost longitude                       | Lo<sub>2</sub>  | 359.0625E = 0.9375W |
+| Longitudinal increment                      | D<sub>i</sub>     | .9375 &deg          |
+| Number of latitude circles, pole to equator | N      | 95                  |
+
+ 
+Data Decomposition
+------------------
+
+The data decomposition is based on a "shuffled" row-only distribution
+for better load balance.  The algorithm works as follows:
+
+ * Sort the rows in the descending order of the number of points per
+   row.  The rows with the most number of points will be the first.
+   (See example for the 
+   \ref GRID_wam "WAM grid".)
+
+ * Card deal the rows in the above sorted order to each processors,
+   one at a time.  For instance, if four processors are used,
+   processor \#1 will get rows 47, 43, 39, 35, 60, ... and processor \#2
+   will get rows 46, 42, 38, 48, 59, etc...)
+
+Reference
+
+1 [Master List of NCEP Storage Grids, GRIB Edition 1 (FM92)](http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html), grid is number 126.
Index: checkout/doc/REPORT-20160801-WAM-IPE-standalone.md
===================================================================
--- checkout/doc/REPORT-20160801-WAM-IPE-standalone.md	(nonexistent)
+++ checkout/doc/REPORT-20160801-WAM-IPE-standalone.md	(revision 94669)
@@ -0,0 +1,339 @@
+Comparison with Stand-alone Components Test Report {#REPORT-20160801-WAM-IPE-standalone}
+================================================== 
+
+\date 08/1/2016
+
+**User**: Robert.Oehmke
+
+**Project**: nems
+
+**Platform**: Theia Cray CS400
+
+**ESMF Version**: ESMF_7_0_0
+
+### Repositories:
+     
+Application:
+
+    https://svnemc.ncep.noaa.gov/projects/ipe/WAM-IPE
+        -r 80061 
+
+NEMS:
+
+    https://svnemc.ncep.noaa.gov/projects/nems/branches/WAM-IPE/milestone4
+        -r 80061 
+
+WAM/GSM:
+
+    https://svnemc.ncep.noaa.gov/projects/gsm/branches/WAM-IPE/milestone4
+        -r 76469 
+
+IPE:
+
+    https://github.com/IPEBestModelInTheWorld/ipe/trunk
+        -r 362 
+
+IPE_CAP:
+
+    https://svnemc.ncep.noaa.gov/projects/ipe/branches/nuopc_cap
+        -r 75858
+
+Stand-alone Version of IPE Repository:
+
+    https://github.com/IPEBestModelInTheWorld/ipe/trunk
+        -r 368 
+
+### Model Versions
+      
+ * NEMS Ionosphere Plasmasphere Electrodynamics (IPE) model R362 
+
+ * NEMS Whole Atmosphere Model (WAM) R76469 
+
+ * Stand-alone Ionosphere Plasmasphere Electrodynamics (IPE) model R368 
+
+Execution
+---------
+
+### Environment  
+
+    COMPONENTS=( GSM, IPE, DATAWAM, DATAIPE)
+    IPE_SRCDIR=$ROOTDIR/IPE
+    IPE_BINDIR=$ROOTDIR/IPE-INSTALL
+    DATAWAM_SRCDIR=$ROOTDIR/DATAWAM
+    DATAWAM_BINDIR=$ROOTDIR/DATAWAM-INSTALL
+    DATAIPE_SRCDIR=$ROOTDIR/DATAIPE
+    DATAIPE_BINDIR=$ROOTDIR/DATAIPE-INSTALL
+    source /etc/profile
+    module load intel impi netcdf
+        Intel: 14.0.2
+        Intel MPI: 4.1.3.048
+        NetCDF: 4.3.0
+    module use /scratch4/NCEPDEV/nems/save/Gerhard.Theurich/Modulefiles
+    module load esmf/7.0.0
+
+### NEMS Build Command
+
+    NEMS/NEMSAppBuilder
+
+
+### NEMS Compsets
+
+#### `swpc%20090115_1hr_sbys_gsm%wam%T62_ipe%80x170`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+###############################################################################
+#
+#  WAM-IPE side-by-side run
+#
+###############################################################################
+
+export TEST_DESCR="WAM-IPE 1h side-by-side run"
+
+# - gsm configuration ---
+export_gsm
+export CDATE=2009011500
+export WLCLK=30
+export NHRS=1
+export FHOUT=1
+export TASKS=32
+export PE1=32
+export THRD=1
+export QUILT=.false.
+export FDFI=0
+export CP2=.false.
+export IDEA=.true.
+export IDVC=3
+export THERMODYN_ID=3
+export SFCPRESS_ID=2
+export SPECTRALLOOP=2
+
+# - IPE configuration ---
+export IPECASE=20090115_1hr_sbys_80x170
+
+# - nems.configure ---
+export_nems
+export nems_configure=atm_ipm
+export atm_model=gsm
+export atm_petlist_bounds="0 15"
+export ipm_model=ipe
+export ipm_petlist_bounds="16 31"
+export coupling_interval_fast_sec=180.0
+export coupling_interval_sec=180.0
+export F107_KP_SIZE=56
+export F107_KP_INTERVAL=10800
+export WAM_IPE_COUPLING=.true.
+export HEIGHT_DEPENDENT_G=.true.
+export F107_KP_SKIP_SIZE=24
+
+# - component specific setup calls ---
+setup_wam_T62_2009011500
+setup_ipe
+setup_spaceweather_gsm%wam%T62_ipe%80x170
+
+# -
+RUN_SCRIPT=rt_gfs.sh
+
+# - validation
+export CNTL_DIR=swpc%20090115_1hr_sbys_gsm%wam%T62_ipe%80x170_V0002
+export LIST_FILES="IPE.inp SMSnamelist \
+                  sigf00 sigf01 sfcf00 sfcf01 flxf00 flxf01 \
+                  plasma00 plasma01 plasma02 plasma03 plasma04 \
+                  plasma05 plasma06 plasma07 plasma08 plasma09 \
+                  plasma10 plasma11 plasma12 plasma13 plasma14 \
+                  plasma15 plasma16 \
+                  wam3dgridnew2.nc ipe3dgrid2.nc"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#### `swpc%20090115_1hr_sbys_gsm%wam%T62`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~    
+###############################################################################
+#
+#  WAM side-by-side run
+#
+###############################################################################
+
+export TEST_DESCR="WAM 1h side-by-side run"
+
+# - gsm configuration ---
+export_gsm
+export CDATE=2009011500
+export WLCLK=30
+export NHRS=1
+export FHOUT=1
+export TASKS=32
+export PE1=32
+export THRD=1
+export QUILT=.false.
+export FDFI=0
+export CP2=.false.
+export IDEA=.true.
+export IDVC=3
+export THERMODYN_ID=3
+export SFCPRESS_ID=2
+export SPECTRALLOOP=2
+
+# - nems.configure ---
+export_nems
+export nems_configure=atm
+export atm_model=gsm
+export atm_petlist_bounds="0 15"
+export coupling_interval_fast_sec=180.0
+export coupling_interval_sec=180.0
+export F107_KP_SIZE=56
+export F107_KP_INTERVAL=10800
+#export WAM_IPE_COUPLING=.true.
+export HEIGHT_DEPENDENT_G=.true.
+export F107_KP_SKIP_SIZE=24
+
+# - component specific setup calls ---
+setup_wam_T62_2009011500
+setup_spaceweather_gsm%wam%T62_ipe%80x170
+
+# -
+RUN_SCRIPT=rt_gfs.sh
+
+# - validation
+export CNTL_DIR=swpc%20090115_1hr_sbys_gsm%wam%T62_ipe%80x170_V0002
+export LIST_FILES="sigf00 sigf01 sfcf00 sfcf01 flxf00 flxf01"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    
+### NEMS Configurations
+     
+#### `nems.configure.atm_ipm.IN`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~    
+#############################################
+####  NEMS Run-Time Configuration File  #####
+#############################################
+
+# EARTH #
+EARTH_component_list: MED ATM IPM
+EARTH_attributes::
+ Verbosity = max
+::
+
+# ATM #
+ATM_model:                      _atm_model_
+ATM_petlist_bounds:             _atm_petlist_bounds_
+ATM_attributes::
+Verbosity = max
+::
+
+# IPM #
+IPM_model:                      _ipm_model_
+IPM_petlist_bounds:             _ipm_petlist_bounds_
+IPM_attributes::
+ Verbosity = max
+::
+
+# Run Sequence #
+runSeq::
+ @_coupling_interval_sec_
+   ATM
+   IPM
+ @
+::
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#### `nems.configure.atm.IN`
+
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~    
+#############################################
+####  NEMS Run-Time Configuration File  #####
+#############################################
+
+# EARTH #
+EARTH_component_list: MED ATM IPM
+EARTH_attributes::
+ Verbosity = max
+::
+
+# ATM #
+ATM_model:                      _atm_model_
+ATM_petlist_bounds:             _atm_petlist_bounds_
+ATM_attributes::
+Verbosity = max
+::
+
+# Run Sequence #
+runSeq::
+ @_coupling_interval_sec_
+   ATM
+ @
+::
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### Runtime Data Files
+
+IPE:
+
+    /scratch3/NCEPDEV/swpc/noscrub/Naomi.Maruyama/ipe/grid/apex/1/GIP_apex_coords_global_lowres_new20120705
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/IPE/cases/20090115_1hr_sbys_80x170/
+
+WAM: 
+
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2009011500/*anl* 
+    /scratch4/NCEPDEV/nems/noscrub/NEMS-Data/WAM/T62_2009011500/wam_input_f107_kp.txt
+
+### Run Directories
+
+Side by Side WAM-IPE:
+
+    /scratch3/NCEPDEV/swpc/scrub/Robert.Oehmke/rt_86860/swpc%20090115_1hr_sbys_gsm%wam%T62_ipe%80x170
+
+Stand-alone IPE:
+
+    /scratch3/NCEPDEV/swpc/noscrub/Robert.Oehmke/IPE/run/1469939250_ipe_theia_intel_parallel_40
+
+Stand-alone WAM:
+
+    /scratch3/NCEPDEV/swpc/scrub/Robert.Oehmke/rt_123761/swpc%20090115_1hr_sbys_gsm%wam%T62
+
+Validation
+----------
+ 
+
+Validated by using the UNIX cmp command to do a byte by byte comparison of output files.
+
+ 
+| File Name | File Source 1   | File Source 1                   | Number of Bytes Difference | Status |
+| :-------: | :-------------: | :-----------------------------: | :------------------------: | :----: |
+| plasma00  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma01  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma02  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma03  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma04  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma05  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma06  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma07  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma08  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma09  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma10  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma11  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma12  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma13  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma14  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma15  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| plasma16  | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2000 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2001 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2002 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2003 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2004 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2005 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2006 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2007 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2008 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2009 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+| fort.2010 | Stand-alone IPE | IPE within side by side WAM-IPE |     0         | OK |
+
+| File Name | File Source 1   | File Source 1                   | Number of Bytes Difference | Status |
+| :-------: | :-------------: | :-----------------------------: | :------------------------: | :----: |
+| flxf00 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+| flxf01 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+| sfcf00 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+| sfcf01 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+| sigf00 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+| sigf01 | Stand-alone WAM | WAM within side by side WAM-IPE |     0         | OK |
+
Index: checkout/doc/cap-doc.md
===================================================================
--- checkout/doc/cap-doc.md	(nonexistent)
+++ checkout/doc/cap-doc.md	(revision 94669)
@@ -0,0 +1,15 @@
+Cap Documentation {#cap-page}
+=================
+
+Caps are codes that act as adaptors, enabling models to use 
+[NUOPC](https://www.earthsystemcog.org/projects/nuopc/)
+standard component interfaces. They are called caps because they "sit
+on top" of user code and call into it. They contain translations of
+native model data structures into ESMF data structures.
+
+For documentation of caps, see the 
+[Earth System Prediction Suite (ESPS)](https://www.earthsystemcog.org/projects/esps/)
+site. Find the components on the table on the landing page, and
+follow the corresponding Code Access and Documentation link. Not all
+listed components have documented caps. The ESPS is a collection of
+components that are compliant with NUOPC conventions.
Index: checkout/doc/DREV70089.md
===================================================================
--- checkout/doc/DREV70089.md	(nonexistent)
+++ checkout/doc/DREV70089.md	(revision 94669)
@@ -0,0 +1,382 @@
+DREV70089: Reg-Hydro 0.2 {#milestone_DREV70089}
+========================
+
+\date 02/22/2016
+
+Repository URL
+--------------
+
+* https://svnemc.ncep.noaa.gov/projects/nems/apps/Regional/trunk
+
+Description
+-----------
+
+The Regional 0.2 (revision 70089) milestone is an internal release of
+a NEMS modeling application with five active components: GSM
+atmosphere, CICE sea ice, MOM ocean, LIS/Noah land, and WRF-Hydro.
+GSM-CICE-MOM run as a fully coupled three-way system (essentially the
+\ref milestone_DREV58214 "UGCS-Seasonal configuration").
+LIS/Noah and WRF-Hydro receive fields from GSM and these are hooked
+up to model internals.  LIS/Noah and WRF-Hydro send fields to the GSM
+and they are received at the interface, but these fields are not
+connected to GSM internals.  All field exchanges in the system occur
+through the central NEMS mediator.  This is a technical
+(non-scientific) milestone to ensure that all five active components
+are able to run inside NEMS and to ensure that field data is correctly
+regridded through the Mediator among GSM, LIS/Noah and WRF-Hydro.  For
+this release, the outputs from LIS/Noah and WRF-Hydro are not used by
+GSM, which uses its own internal land surface model.  GSM and LIS/Noah
+run on a global ~1 degree T126 grid (
+\ref GRID_gsm "GSM Grid",
+\ref GRID_LIS_T126 "LIS T126 Grid").  MOM
+and CICE run on a global ~1 degree tripolar grid
+(\ref GRID_mom5_1deg_tripole "MOM-CICE Grids").
+WRF-Hydro runs on a small regional domain over the Colorado Front
+Range at 1km resolution (Front Range Grid).  The global components
+(GSM-CICE-MOM) are currently used in the system as stand-ins for the
+regional atmosphere-ice-ocean components.  The regional atmosphere
+NMMB was not used because it was not available as a NUOPC component
+when development began on this milestone.
+
+\todo reference grids
+ 
+NUOPC "caps", which are essentially wrappers for the coupling interface, are provided for all model components, including WRF-Hydro and LIS, allowing these components to work in NEMS and other NUOPC-compliant systems.  The version of each component used is:
+ 
+ * [GSM - R69235](https://www.earthsystemcog.org/projects/esps/atmospheric_models#GSM)
+ * [MOM 5.0](https://www.earthsystemcog.org/projects/esps/ocean_models#MOM5)
+ * [CICE 5.0.2](https://www.earthsystemcog.org/projects/esps/ice_models#CICE5_0_2)
+ * [LIS 7.1r / Noah 3.3](https://www.earthsystemcog.org/projects/esps/land_surface_models#LIS7_1_NOAH3_3)
+ * [WRF-Hydro 3.0](https://www.earthsystemcog.org/projects/esps/hydrological_models#WRFHYDRO3)
+
+In this release only a subset of possible fields are exchanged between
+LIS/Noah, WRFHydro and the active atmosphere.  Because complete
+forcings are not provided as input to these components, it should not
+be expected that the land surface model nor hydrological models
+produce physically realistic output in the current configuration.  The
+[Regional v0.2 coupling field spreadsheet](https://drive.google.com/open?id=1plqAjA3u1cXueT9DHk3erur6kI5glxkXkwA5cxsoM5M)
+indicates in detail which fields are advertised and hooked up
+internally for each component as well as which fields are exchanged
+with the Mediator.
+ 
+
+* LIS fields advertised in its NUOPC cap are hooked up to the internal
+  LIS data structures, LIS_FORC_State (exports) and LISWRF_export
+  (imports), which are in turn connected by LIS to an internal land
+  surface model.  At this time, the LISWRF_export data structure only
+  supports the Noah 3.3 land surface model.  The list of LIS imports
+  that are advertised and hooked up to internal data structures can be
+  seen in columns AN and AO, respectively, of the field spreadsheet
+  (linked above).  The lists of LIS exports that are advertised and
+  hooked up are in columns AQ and AR.
+     
+* WRF-Hydro fields advertised in its NUOPC cap are hooked up directly
+  to the internal rt_domain data structure.  Currently, WRF-Hydro does
+  not use meterological forcings directly to compute hydrological
+  state, so GSM fields transferred from the Mediator to WRF-Hydro are
+  only intended to test the integrity of the field transfer and
+  regridding.  The list of WRF-Hydro imports that are advertised and
+  hooked up to internal data structures can be seen in columns AT and
+  AU, respectively, of the field spreadsheet.  The list of WRF-Hydro
+  exports that are advertised and hooked up are in columns AW and AX.
+     
+* Because GSM is running with an embedded land surface model, the GSM
+  NUOPC cap does not advertise import fields for land surface or
+  hydrological inputs.  In other words, for this technical milestone,
+  the land surface model is duplicated: an internal land surface model
+  used by GSM and an external land surface model, driven as a separate
+  component.  Therefore, LIS/Noah and WRF-Hydro fields exported from
+  the Mediator to GSM are only intended to test the integrity of the
+  field transfer and regridding and are not used to affect the state
+  of GSM.
+     
+* For this release, not all fields advertised in the LIS and WRF-Hydro
+  NUOPC caps are also advertised in the Mediator. Instead, a small
+  number of fields have been activated to test the integrity of the
+  field transfer and regridding.  Land and hydro fields imported into
+  the Mediator are indicated by an "l" and "h," respectively, in
+  column I of the field spreadsheet.  Likewise, land and hydro fields
+  exported from the Mediator are indicated in column M.  The set of
+  field pairs for which regridding was validated appears in the
+  Mediator Regridding section below.
+
+Build & Run
+-----------
+
+Instructions on how to build and run specific code revisions
+(e.g. this milestone revision) and the supported compsets are provided
+on the [Regional Application Build & Run](https://esgf.esrl.noaa.gov/projects/couplednems/regional) page.
+
+\todo move regional app page to nems repo
+
+Specifying Runtime Configuration
+--------------------------------
+
+The LIS and WRFHYDRO runtime configuration files are specified as parameters when calling the component setup function in the compset file.  Default locations for configuration files are provided for each available compset on both Theia and Yellowstone.
+
+setup_lis "[Full path to lis.config file]"
+
+setup_wrfhydro "[Full path to hydro.namelist file]" "[Directory containing parameter files]" "[Full path to namelist.hrldas file]"
+Cleaning & Rebuilding
+
+The NEMSAppBuilder will prompt, for each component, whether to clean and rebuild or use an existing library, if one was previously built.
+External Repositories
+
+The Regional application is made up of some external components with
+master repositories outside of EMC, including LIS and WRF-Hydro.  A
+basic strategy with rsync is being used to ensure that changes to
+external components are committed to the EMC Subversion repository and
+the external component's master repository.  Technical details are
+available here:
+
+ * https://www.earthsystemcog.org/projects/earthsystembridge/lis_upstream_sync
+
+\todo The lis_upstream_sync page no longer exists.  Find substitute or remove.
+
+Code Repositories
+-----------------
+
+WRF-Hydro source code is available in the NEMS SVN repository.  It is
+maintained there as a clone of the official WRF-Hydro repository at
+NCAR (privately hosted on GitHub).  Specifically, the version used for
+the v0.2 Regional app is:
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/external_comps/WRFHYDRO/trunk/NDHM  (revision 70071)
+
+LIS source code is also available in the NEMS SVN repository.  It is
+maintained as a clone of the official LIS repository hosted at
+NASA-NCCS (private access).
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/external_comps/LIS/branches/nuopc (revision 70071)
+
+Run Sequence
+------------
+
+The NEMS run-time configuration for the default Regional configuration
+is provided below.  Note that LND and HYD are run on disjoint
+processor sets, and will run concurrently during each step of the
+inner time loop.  For details on the run sequence in general please
+refer to the 
+\ref configuring
+and
+\ref architecture
+pages.
+
+    runSeq::
+      @7200.0
+        OCN -> MED :remapMethod=redist
+        MED MedPhase_slow
+        MED -> OCN :remapMethod=redist
+        OCN
+        @3600.0
+          MED MedPhase_fast_before
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          MED -> LND :remapMethod=redist
+          MED -> HYD :remapMethod=redist
+          ATM
+          LND
+          HYD
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          LND -> MED :remapMethod=redist
+          HYD -> MED :remapMethod=redist
+          MED MedPhase_fast_after
+        @
+      @
+    ::
+ 
+ 
+Validation
+----------
+
+The validation procedure for this release is to verify correct
+redistribution of data from the LIS and WRF-Hydro component caps to
+the Mediator and to verify correct regridding of fields among the ATM,
+LND, and HYD components.  All components run for a four hour
+simulation using a set of initial conditions from 2009.  Details of
+the validation appear in the sections below.
+
+A detailed test report is available for the validation run:  
+[NEMS Regional_0_2 Test Report 2016-02-19](https://esgf.esrl.noaa.gov/projects/couplednems/regional_testreports_regional_0_2)
+
+\todo move NEMS Regional_0_2 Test Report 2016-02-19 to repo
+
+### Connector Redistribution
+
+Redistribution of data between each component and the mediator has
+been bit-for-bit validated using ncdiff to compute the per cell
+difference between files and ncwa to find the maximum and minimum
+difference values.  All connections reported zeros for the maximum and
+minimum differences.
+
+| Connection    |  Field                               |  Source File Prefix    | Destination File Prefix |  Min Diff  |   Max Diff  |  Status     |
+| :------------ | :----------------------------------- | :--------------------- | :---------------------- | :--------- | :---------- | :---------- |
+| GSM->MED      | inst_down_lw_flx                     | field_atm_export_      | field_med_from_atm_     | 0          | 0           | OK          |
+| GSM->MED      | mean_prec_rate                       | field_atm_export_      | field_med_from_atm_     | 0          | 0           | OK          |
+| LIS->MED      | mean_laten_heat_flx_atm_into_lnd     | field_lis_export_      | field_med_from_lnd_     | 0          | 0           | OK          |
+| LIS->MED      | temperature_of_soil_layer_1          | field_lis_export_      | field_med_from_lnd_     | 0          | 0           | OK          |
+| WRFHYDRO->MED | liquid_water_content_of_soil_layer_1 | field_wrfhydro_export_ | field_med_from_hyd_     | 0          | 0           | OK          |
+| MED->WRFHYDRO | temperature_of_soil_layer_1          | field_med_to_hyd_      | field_wrfhydro_import_  | 0          | 0           | OK          |
+| MED->LIS      | liquid_water_content_of_soil_layer_1 | field_med_to_lnd_      | field_lis_import_       | 0          | 0           | OK          |
+| MED->LIS      | mean_prec_rate                       | field_med_to_lnd_      | field_lis_import_       | 0          | 0           | OK          |
+| MED->WRFHYDRO | inst_down_lw_flx                     | field_med_to_hyd_      | field_wrfhydro_import   | 0          | 0           | OK          |
+| MED->GSM      | (GSM does not advertise import<br> fields from LIS or WRFHYDRO) | &nbsp; | &nbsp;       | N/A        | N/A         | Not Tested. |
+
+### Plot Generation
+
+Regridded field data is visualized on lat-lot plots using the following Ferret command.
+
+    shade/k=<TIMESTEP>/vlimits=<LATITUDE_SOUTH>:<LATITUDE_NORTH>/hlimits=<LONGITUDE_WEST>:<LONGITUDE_EAST>/levels=(<SCALE_LOWEST>,<SCALE_HIGHEST>,<SCALE_STEP>) ignore0(<FIELD>[d=<DATASET>]), <LONGITUDE_FIELD>[d=<DATASET>], <LATITUDE_FIELD>[d=<DATASET>]
+
+GSM data must be regridded to LIS even though both components run on the T126 grid.  GSM populates the T126 grid cells from northernmost latitude to southernmost latitude therefore Fortran cell index (1,1) is the North-East cell.  LIS populates the T126 grid cells from southernmost latitude to northernmost latitude therefore Fortran cell index (1,1) is the South-East cell.
+
+#### GSM: mean_prec_rate plotted on cell indices versus plotted on cell coordinates
+
+\image html DREV70089-atm_mean_prec_rate.png
+
+LEFT: Mean precipitation rate plotted on Fortran array indices.
+
+RIGHT: Mean precipitation rate plotted on cell center longitude (x) and cell center latitude (y) coordinates.
+
+### Mediator Regridding
+
+The table below lists the field regridding operations in the Mediator
+that were verified using visual inspection.  Each regridded field is
+shown below in before and after plots.
+
+| Regrid          | Field                                | Source Grid               | Destination Grid          | Link              | Status |
+| :-------------- | :----------------------------------- | :------------------------ | :------------------------ | :---------------- | :----- |
+| GSM -> LIS      | mean_prec_rate                       | GSM T126 Grid             | LIS T126 Grid             | \#GSM_to_LIS      | OK     |
+| GSM -> WRFHYDRO | inst_down_lw_flx                     | GSM T126 Grid             | Front Range Regional Grid | \#GSM_to_WRFHYDRO | OK     |
+| LIS -> GSM      | mean_laten_heat_flx                  | LIS T126 Grid             | GSM T126 Grid             | \#LIS_to_GSM      | OK     |
+| LIS -> WRFHYDRO | temperature_of_soil_layer_1          | LIS T126 Grid             | Front Range Regional Grid | \#LIS_to_WRFHYDRO | OK     |
+| WRFHYDRO -> LIS | liquid_water_content_of_soil_layer_1 | Front Range Regional Grid | LIS T126 Grid             | \#WRFHYDRO_to_LIS | OK     |
+| WRFHYDRO -> GSM | liquid_water_content_of_soil_layer_1 | Front Range Regional Grid | GSM T126 Grid             | \#WRFHYDRO_to_GSM | OK     |
+
+### GSM -> LIS: mean_prec_rate
+
+\image html DREV70089-atm2lnd_regrid_mean_prec_rate.png
+
+LEFT: Mean precipitation rate after the NEMS Mediator has imported and
+redistributed data from GSM.
+
+RIGHT: Mean precipitation rate after the NEMS Mediator has regridded
+data to the LIS grid.
+
+------------------------------------------------------------------------
+
+#### GSM -> WRFHYDRO: inst_down_lw_flx
+
+\image html DREV70089-atm2hyd_regrid_inst_down_lw_flx.png
+
+LEFT: Instantaneous downward longwave flux scaled to the WRFHYDRO
+regional grid after the NEMS Mediator has imported and redistributed
+data from GSM.
+
+RIGHT: Instantaneous downward longwave flux after the NEMS Mediator
+has regridded data to the WRFHYDRO grid.
+
+####  GSM: Global inst_down_lw_flx
+
+\image html DREV70089-atm_inst_down_lw_flx.png
+
+ABOVE: Instantaneous downward longwave flux on the global T126 grid
+after the NEMS Mediator has imported and redistributed data from GSM.
+
+------------------------------------------------------------------------
+
+#### LIS -> GSM: mean_laten_heat_flx
+
+\image html DREV70089-lnd2atm_regrid_mean_laten_heat_flx.png
+
+LEFT: Mean latent heat flux after the NEMS Mediator has imported and
+redistributed data from LIS.
+
+RIGHT: Mean latent heat flux after the NEMS Mediator has regridded
+data to the GSM grid.
+
+------------------------------------------------------------------------
+
+#### LIS -> WRFHYDRO: temperature_of_soil_layer_1
+
+\image html lnd2hyd_temperature_of_soil_layer_1.png
+
+LEFT: Temperature of the first soil layer scaled to the WRFHYDRO
+regional grid after the NEMS Mediator has imported and redistributed
+data from LIS.
+
+RIGHT: Temperature of the first soil layer after the NEMS Mediator has
+regridded data to the WRFHYDRO grid.
+
+#### LIS: Global temperature_of_soil_layer_1
+
+\image html DREV70089-lnd_temperature_of_soil_layer_1.png
+
+ABOVE: Temperature of the first soil layer on the global T126 grid
+after the NEMS Mediator has imported and redistributed data from LIS.
+
+------------------------------------------------------------------------
+
+####  WRFHYDRO -> LIS: liquid_water_content_of_soil_layer_1
+
+\image html DREV70089-hyd2lnd_regrid_liquid_water_content_of_soil_layer_1.png
+
+LEFT: Liquid water content of the first soil layer after the NEMS
+Mediator has imported and redistributed data from WRFHYDRO.
+
+RIGHT: Liquid water content of the first soil layer scaled to the
+WRFHYDRO regional grid after the NEMS Mediator has regridded data to
+the LIS grid.
+
+####  LIS: Global liquid_water_content_of_soil_layer_1
+
+\image html DREV70089-lnd_liquid_water_content_of_soil_layer_1.png
+
+ABOVE:  Liquid water content of the first soil layer on the global T126 grid after the NEMS Mediator has regridded data to the LIS grid.
+
+####  WRFHYDRO -> GSM: liquid_water_content_of_soil_layer_1
+
+\image html DREV70089-hyd2atm_regrid_liquid_water_content_of_soil_layer_1.png
+
+LEFT: Liquid water content of the first soil layer after the NEMS
+Mediator has imported and redistributed data from WRFHYDRO.
+
+RIGHT: Liquid water content of the first soil layer scaled to the
+WRFHYDRO regional grid after the NEMS Mediator has regridded data to
+the GSM grid.
+
+#### GSM: Global liquid_water_content_of_soil_layer_1
+
+\image html DREV70089-atm_liquid_water_content_of_soil_layer_1.png
+
+ABOVE:  Liquid water content of the first soil layer on the global T126 grid after the NEMS Mediator has regridded data to the GSM grid.
+
+Limitations and Technical Notes
+-------------------------------
+
+<table>
+<tr>
+  <th>Issue</th><th> Description</th><th>   Solution</th>
+</tr><tr>
+<td>LIS Grid</td>
+<td>LIS does not provide all the grid information required for conservative regridding.  Cell corner coordinates and cell areas are missing.</td>
+<td>LIS is running on the same grid as GSM, T126.  The corner coordinates and areas are copied from ATM to LND within the mediator.  Although LIS and GSM share the same grid the latitude coordinates are filled in opposite directions.  Therefore when the corner coordinates are copied from ATM to LND the latitude coordinates are inverted.</td>
+</tr><tr>
+<td>WRF-Hydro Grid</td>
+<td>WRF-Hydro does not provide all the grid information required for conservative regridding.  Cell areas are missing.    WRF-Hydro uses the ESMF cell area calculation.  The ESMF cell area calculation computes cell area in square Earth radians.  Square Earth radians are converted to square meters using the Earth's mean radius (m) squared, 6,376,0002.  This is the Earth's radius defined in the GSM cap.</td>
+<td>The NEMS_GRID environment variable defines the CICE grid during compile time.  Once CICE has been compiled with a specified grid it cannot be changed at runtime.     The NEMS_GRID environment variable is set in the regional.AppBuilder file to T126_nx1.</td>
+</tr>
+</table>
+
+Variable Number of Soil Layers
+------------------------------
+
+LIS provides an abstraction of multiple land surface models, each
+having varying numbers of soil layers.  The LIS abstraction of coupled
+export fields is only complete for Noah3.3 at this time and only
+supports 4 soil layers.  The LIS NUOPC cap leverages the coupled
+export fields abstraction and therefore only supports Noah3.3 and 4
+soil layers.  At this this time the individual soil layers are broken
+into four 2D fields, which are coupled through NUOPC, therefore
+eliminating the need for an ungridded field dimension.
\ No newline at end of file
Index: checkout/doc/HowTo_OldToNewStruct_folder.md
===================================================================
--- checkout/doc/HowTo_OldToNewStruct_folder.md	(nonexistent)
+++ checkout/doc/HowTo_OldToNewStruct_folder.md	(revision 94669)
@@ -0,0 +1,145 @@
+Step 1. Setting up new folder Structure  {#HowToOldToNewFolder}
+==================================
+ 
+Step 1.1:
+--------
+
+Determine where your app is going.  In this case, we are moving the
+app from WW3TestBed to the UGCSWeather App in a branch called
+UpdateStructure.  Most likely in your case, you will just be making a
+branch called UpdateStructure in your current App directory.
+ 
+Step 1.2:
+----------
+
+Determine which app in the updated structure most closely resembles
+your app. To see the status of NEMS Apps go to
+
+ * https://vlab.ncep.noaa.gov/web/environmental-modeling-center/nems-applications
+
+If you have a GSM based app, this app is the NEMSGSM app
+
+ * https://svnemc.ncep.noaa.gov/projects/nems/apps/NEMSGSM/trunk/
+ 
+Step 1.3:
+---------- 
+
+Create your directory structure. If possible, do this by copying the
+reference app trunk.  For us, we will copy the NEMSGSM trunk, so:
+ 
+    svn copy https://svnemc.ncep.noaa.gov/projects/nems/apps/NEMSGSM/trunk/ https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/branches/UpdateStructure
+ 
+\todo Make sure all updates put in this branch are in the NEMSGSM trunk now too
+
+Step 1.4:
+----------  
+
+Now check out your new app: 
+ 
+    svn co https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/branches/UpdateStructure UpdateStructure 
+
+Step 1.5:
+---------- 
+
+Look at the new app structure and externals: 
+ 
+    $ cd UpdateStructure 
+    $ ls 
+    CHEM      conf    GSM  modulefiles  oldtests  standaloneGSM.appBuilder
+    compsets  doc    log  NEMS      parm        standaloneGSM%gocart.appBuilder
+    $svn propget svn:externals 
+    NEMS     -r92559      https://svnemc.ncep.noaa.gov/projects/nems/trunk
+    GSM      -r89107      https://svnemc.ncep.noaa.gov/projects/gsm/trunk
+    CHEM     -r85947      https://svnemc.ncep.noaa.gov/projects/aerosol/chem/trunk
+
+At the application level, there are 7 folders and several
+`<appconfig>.appBuilder` files. Many things that were previously kept in
+NEMS, such as the equivalent of compsets, are now kept at the
+application level.  The appBuilder files give information about
+components for each application configuration.  A complete description
+of an appBuilder file can be found here.
+
+\todo link to appbuilder file description  
+
+The name and contents of each app level folder is below. Note, these
+descriptions can also be found here.  \todo link to repository
+structure
+
+\todo replace with link to relevant page of compset runner documentation
+
+- conf/ - build configuration files
+- compsets/ - definition of compsets for this application
+- doc/ - contains application-specific documentation
+- modulefiles/ - module files to load external dependencies such as NetCDF
+    + theia/ - module files for Theia machine
+    + wcoss.cray/ - module files for the Cray partition of WCOSS
+    + ... more platform support ...
+- parm/ - small parameter files for compsets. Large files are located outside of the repository.
+- log/ - subdirectory for log files from execution and compilation. These are placed in the repository log/ directory so that they can be used to track changes, by re-using compsets as regression tests.
+
+\todo The doc folder is not listed in the repository structure 
+ 
+Note that all of the external components are contained in folders with
+all caps and that the GSM and CHEM components no longer live in a
+subdirectory of NEMS. Now that many application specific items have
+been removed from NEMS to the application level, NEMS now contains the
+following elements, which are also described 
+\ref structure "here".
+
+ 
+Within NEMS/ resides:
+
+ * NEMSAppBuilder - script to build the NEMS application 
+ * NEMSCompsetRun  
+ * exe/ - built executables are placed here
+ * src/ - NEMS code is located here
+ * doc/ - NEMS level documentation.
+ * test/ - test execution logic
+
+\todo In my version I also have oldtests (directory of old nems level things that have moved to app level... maybe this folder should be deleted?) and OldCompsetRun (prior version of compset runner)
+
+\todo add description or link to NEMSCcompsetRun (and appbuilder) 
+
+\todo in 
+\ref structure "doc is missing"  
+  
+Step 1.6: 
+----------
+
+Make branches from the trunk of the external components that will need to be modified.
+For this example, this is GSM and NEMS. Changes to these
+branches will be added later.
+ 
+    svn copy https://svnemc.ncep.noaa.gov/projects/gsm/trunk/ \
+       https://svnemc.ncep.noaa.gov/projects/gsm/branches/NEMSUpdate/UGCSWeather \
+       -m 'Creating branch for updating UGCSWeather to new NEMS'
+    svn copy https://svnemc.ncep.noaa.gov/projects/nems/trunk/ \
+       https://svnemc.ncep.noaa.gov/projects/nems/branches/NEMSUpdate/UGCSWeather \
+       -m 'Creating branch for updating UGCSWeather to new NEMS'
+ 
+Step 1.7: 
+----------
+
+Update the externals to include any other components needed in the
+App and to point to any new branches made in the
+previous step. To do this first set the `$EDITOR`
+or `$SVN_EDITOR` environment variable to your preferred editor.  It's
+best to add this to your `~/.cshrc` or equivalent file.  Don't forget
+to source the file after updating it, with the following changes:
+ 
+    setenv SVN_EDITOR vi
+
+or 
+
+    export SVN_EDITOR=vi
+ 
+Now, update the externals: 
+ 
+    svn propedit svn:externals . 
+ 
+To see the result of this step see the following tag
+
+ *  https://svnemc.ncep.noaa.gov/projects/nems/apps/UGCS-Weather/tags/NEMSTutorial/Step7
+
+
+
Index: checkout/doc/DREV73964.md
===================================================================
--- checkout/doc/DREV73964.md	(nonexistent)
+++ checkout/doc/DREV73964.md	(revision 94669)
@@ -0,0 +1,176 @@
+DREV73964: UGCS-Seasonal 0.2 {#milestone_DREV73964}
+============================
+
+\date 4/28/2016
+
+Description
+-----------
+
+UGCS-Seasonal 0.2 (DREV73964) is a three-way configuration of the
+[Global Spectral Model (GSM)](http://www.emc.ncep.noaa.gov/index.php?branch=GFS),
+[Modular Ocean Model 5 (MOM5)](http://mom-ocean.org/web), and 
+[Los Alamos Sea Ice Model (CICE)](http://oceans11.lanl.gov/trac/CICE/). 
+GSM runs on a 
+\ref GRID_gsm "T126 grid", and MOM5 and CICE run on a 
+\ref GRID_mom5_0p5deg_tripole "0.5 degree tripolar global grid with 0.25 degree tropics".
+A full set of fields is
+transferred among components using the NEMS mediator (see 
+[exchange field spreadsheet](https://docs.google.com/spreadsheets/d/1Tae7NoGbIfti38QxvmzPy7Z4tIWQgY4zSdq5Xcx9MDk/edit?usp=sharing)).
+Relative to 
+\ref milestone_DREV58214 "UGCS-Seasonal 0.1",
+this revision includes additional verified exchange field connections
+involving sea ice and improved treatment of coastlines. Initialization
+is performed using Climate Forecast System Reanalysis (CFSR) data for
+April 1, 2015 that is consistent for all components. There is a cold
+start initialization implemented using a separate run sequence.
+
+This revision has been run for 30 days and exhibits behavior that is
+Earth-like. The UGCS-Seasonal 0.2 revision is a starting point for
+further physical analysis and integration with workflow and data
+assimilation components.
+
+A primary limitation of the revision is that it has not been optimized
+for performance or memory. In addition, it appears to have a porting
+issue. It has been tested successfully on the NOAA research platform
+theia and the EMC operational platform wcoss, but it fails after
+several days on the NCAR yellowstone computer. These are all IBM
+clusters. Another technical limitation is that a coordinated restart
+capability has not yet been implemented. This prevented the revision
+from being run longer than 30 days.
+
+A known science issue is the presence of unrealistic wind stresses in
+the northern ocean.
+
+Run Sequences
+-------------
+
+UGCS-Seasonal includes two run sequences, a cold start sequence and a
+time integration sequence.
+
+Cold start sequence: The first cold start sequence initializes
+components using a miinimal set of files ingested by GSM. The cold
+start sequence only needs to run for a half hour. However, it runs for
+an hour because there is a limitation on running less than an hour in
+EMC scripts.
+
+    runSeq::
+      @1800.0
+        @600.0
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          ATM
+          ATM -> MED :remapMethod=redist
+          MED MedPhase_prep_ice
+          MED -> ICE :remapMethod=redist
+          ICE
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+    
+Time integration sequence: The second run sequence, shown below, is
+for the time integration loop. It is initialized by restart files
+generated by the cold start sequence. As in UGCS--Seasonal 0.1, there
+is a fast and a slow loop, at 10 minutes and 30 minutes, respectively.
+
+    runSeq::
+      @1800.0
+        MED MedPhase_prep_ocn
+        MED -> OCN :remapMethod=redist
+        OCN
+        @600.0
+          MED MedPhase_prep_ice
+          MED MedPhase_prep_atm
+          MED -> ATM :remapMethod=redist
+          MED -> ICE :remapMethod=redist
+          ATM
+          ICE
+          ATM -> MED :remapMethod=redist
+          ICE -> MED :remapMethod=redist
+          MED MedPhase_atm_ocn_flux
+          MED MedPhase_accum_fast
+        @
+        OCN -> MED :remapMethod=redist
+      @
+    ::
+
+Validation
+----------
+
+Diagnostics from a 5-day run are shown in 
+[this slide set](http://esgf.esrl.noaa.gov/site_media/projects/couplednems/pres_1604_DREV73550_mendez.pptx), from
+M. Mendez, EMC. The revision analyzed was DREV73550, which was shown
+to be bit-for-bit with DREV73964
+
+Diagnostics from a 30-day run are shown in this 
+[slide set](http://esgf.esrl.noaa.gov/site_media/projects/couplednems/ugcsMerge_short.pptx), 
+from M. Mendez, EMC. The revision analyzed was DREV74294, which was
+shown to be bit-for-bit with 
+DREV73946. In addition to results from
+the UGCS-Seasonal 0.2 system, these slides show results from CFSv2,
+CDAS, and a subsequent version of the UGCS-Seasonal code, after a
+merge which updated the GSM version.
+
+These results are from a development system and do not represent a NOAA prediction or product. 
+Build, Run, and Restart
+Download and Build
+
+Instructions on how to download and build a NEMS application are in the 
+\ref documentation "NEMS User's Guide and Reference".
+Running UGCS-Seasonal with a cold start requires additional
+instructions, below.  Cold Start and Run
+
+Compsets that can be run with this revision are:
+
+ * `20150401short_nems_gsm_cice_mom5` (1 day run)
+ * `20150401_nems_gsm_cice_mom5` (5 day run)
+ * `20150401long_nems_gsm_cice_mom5` (30 day run)
+ * `20150401short_nemscold_gsm_cice_mom5` (cold start compset, 1 hr)
+
+To run compsets, start within the UGCS-Seasonal directory and execute
+the
+\ref running "NEMSCompsetRun tool" by typing:
+
+    ./NEMS/NEMSCompsetRun -compset <compset name>
+
+If you leave off the `-compset` argument, CompsetRun will read the
+compset list from a local file. (See more about CompsetRun options
+\ref running "here".)
+
+To initialize a new case of the UGCS-Seasonal from a cold start, run
+the cold start compset, `20150401short_nemscold_gsm_cice_mom5`, to
+generate initial mediator restart files.  That compset runs the
+atm/ice/ocean sequentially for 1 hour. It will generate some initial
+mediator restart files consisting of initial values for coupling
+fields consistent with the current atmosphere, ocean, and sea ice
+conditions.  You then use those initial mediator files to startup a
+standard run with the same model initial conditions and initial model
+date as the cold start run.  To do this, run the coldstart compset
+using CompsetRun as specified above with the compset,
+`20150401short_nemscold_gsm_cice_mom5`.
+
+After running the cold start compset, go into `NEMS/NEMSCompsetRun` and
+modify "setup_med_nems" to pre-stage the cold start mediator restart
+files instead of whatever files are set by default.  This is done in a
+section that looks like,
+
+    cp -f ${DATADIR}/MED_NEMS/${nemsgrid}${nemsgridinp}/* ${RUNDIR}
+    # cp -f /scratch3/NCEPDEV/stmp1/Anthony.Craig/UGCS-Seasonal.r72808/20150401short_nemscold_gsm_cice_mom5/mediator*restart* ${RUNDIR}             
+
+Comment out the first line and uncomment the second line. In the
+second line, set the path to the cold start run directory where the
+cold start case just ran.  This will copy the mediator restart files
+from your cold start run directory into the new run directory.
+
+Once the cold start is done and the `NEMSCompsetRun` is modified, run a
+standard compset like `20150401long_nems_gsm_cice_mom5` to advance the
+model from the initial conditions.  The system will start with the
+same atmosphere, ocean, and ice initial conditions as the cold start
+run plus the new mediator restart files, and the model will run
+concurrently.
\ No newline at end of file
Index: checkout/doc/sw_mediator.md
===================================================================
--- checkout/doc/sw_mediator.md	(nonexistent)
+++ checkout/doc/sw_mediator.md	(revision 94669)
@@ -0,0 +1,47 @@
+Space Weather Mediator            {#sw_mediator}
+======================
+
+The Space Weather Mediator is designed to transfer 3D data between the
+Whole Atmosphere Model (WAM) and the Ionosphere Plasmasphere
+Electrodynamics (IPE) model. The Space Weather Mediator is a 
+[National Unified Operational Prediction Capability (NUOPC)](https://www.earthsystemcog.org/projects/nuopc/)
+mediator and so contains interfaces and methods to pass data between
+two components as part of a NUOPC coupled system. Currently, this
+mediator has only been tested with the
+[DATAWAM](https://esgf.esrl.noaa.gov/projects/wam_ipe/DATA_WAM)
+version of WAM and the
+[DATAIPE](https://esgf.esrl.noaa.gov/projects/wam_ipe/DATA_IPE)
+version of IPE. Over the next several months, this project's goal is
+to connect it to the actual WAM and IPE models. Currently, this
+mediator only transfers data from WAM to IPE. However, at the end of
+the current phase of this project, the goal is to have data going in
+both directions.
+
+One special feature of this mediator is that it works with a varying
+3D height field. The WAM grid's vertical coordinate is at fixed
+pressure levels, so its actual height varies timestep by timestep. The
+mediator is designed to operate with the varying height field without
+the expense of recalculating the full regridding matrix every
+timestep. It does this by using a 3D fixed-height intermediate grid.
+
+During initialization in the mediator, it defines the 3D fixed-height
+intermediate grid. The fixed heights are determined using global
+average heights derived from a WAM model run in a "warm" simulation
+time.  The heights are then extended up to 800 km because the height
+at each point may vary and go beyond the highest average height level.
+The 3D coordinates for this intermediate grid are precalculated and
+stored in a NetCDF file. They are read in to create the grid during
+initialization. Once this grid has been created, a regrid matrix is
+calculated between the fixed-height intermediate grid and the IPE
+grid. Since neither of these grids change, this matrix can be used to
+interpolate between the intermediate grid and IPE grids during the
+entire run.
+
+While the system is running for each timestep, the 3D fields coming
+from WAM are linearly interpolated into the 3D fixed-height
+intermediate grid using the height field coming from WAM. The
+intermediate grid is built to have the same distribution and
+horizontal coordinates as the WAM grid, so the linear interpolation is
+an inexpensive 1D linear interpolation. Once the data is on the
+intermediate grid, the precalculated regrid matrix is used to transfer
+it to the IPE grid.
Index: checkout/doc/old/NEMS.md
===================================================================
--- checkout/doc/old/NEMS.md	(nonexistent)
+++ checkout/doc/old/NEMS.md	(revision 94669)
@@ -0,0 +1,44 @@
+NEMS Directory Structure
+========================
+
+The NEMS directory contains the source code and test scripts for the
+NEMS.  Most of the documentation is in the `doc` subdirectory or in
+the `../doc/` directory.  Most of the files that were in the NEMS have
+been moved to the application layer, discussed below.  Further
+documentation, specific to the app, is also at the app level.
+
+Within NEMS resides:
+
+* `exe` - NEMS.x and other executables built from `src`
+* `src` - main program for NEMS
+ * `ENS_Cpl` - The Ensemble coupler directory.
+ * `conf` - various compliation specifications
+* `doc` - documentation.
+* `NEMSAppBuilder` - a script to build NEMS, as discussed elsewhere in the
+  documentation
+* `NEMSCompsetRun` - script to run NEMS, identical to the regression test runner
+* `OldCompsetRun` - prior version of the compset runner
+* `tests` - test execution logic
+  * `rtgen` - front-end to the regression test runner
+  * `rt.sh` - wrapper around rtgen for users familiar with the old system
+
+At the application level resides these files:
+
+* `doc` - application-specific documentation
+
+* `oldtests` - application-specific, old, test suite which is
+   deprecated but retained for backward compatibility
+
+* `compsets` - configuration for the NEMSCompsetRun and regression
+   test runner
+
+* `oldcompsets` - configuration for the old compset system available
+   via OldCompsetRunner
+
+* `modulefiles` - module loading information for each platform
+  * `theia` - NOAA Theia modulefiles
+  * `wcoss.phase1` - WCOSS Phase 1 modulefiles
+  * ... other directories for other computers ...
+* `conf` - configuration for NEMS/src/configure
+* `parm` - parameter files for the test suites
+* `log` - log directory for the NEMSAppBuilder and NEMSCompsetRun
\ No newline at end of file
Index: checkout/doc/old/markdown.md
===================================================================
--- checkout/doc/old/markdown.md	(nonexistent)
+++ checkout/doc/old/markdown.md	(revision 94669)
@@ -0,0 +1,34 @@
+Markdown
+--------
+
+That creates the `README.html` and `README.css` that you view on your
+browser.
+
+
+The webpage is generated from documentation in the NEMS repository.
+Markdown is a human-readable, wiki-like syntax that can easily be
+converted to other formats.  The Markdown files are converted to HTML
+via Trent Mick's markdown2.py tool.  For details on Markdown, see:
+
+* [Markdown website](https://daringfireball.net/projects/markdown/)
+* [markdown2.py website](https://github.com/trentm/python-markdown2/)
+* [markdown website](https://sourceforge.net/p/doxygen/discussion/markdown_syntax#md_ex_toc1)
+
+This documentation is stored in the NEMSLegacy doc directory.  The
+website can be regenerated by doing this:
+
+svn co https://svnemc.ncep.noaa.gov/projects/nems/apps/(appname)/trunk
+cd trunk/NEMS/doc
+make
+
+[The link text](http://example.net/)
+
+[The link text](http://example.net/ "Link title")
+
+[The link text](/relative/path/to/index.html "Link title") 
+
+[The link text](somefile.html)
+
+
+That creates the `README.html` and `README.css` that you view on your
+browser.
Index: checkout/doc/old/README.NMM.md
===================================================================
--- checkout/doc/old/README.NMM.md	(nonexistent)
+++ checkout/doc/old/README.NMM.md	(revision 94669)
@@ -0,0 +1,354 @@
+NMM Instructions
+----------------
+
+### How to use restart:
+
+1. Keep same end time (nhours_fcst: in config file) like in original run.
+
+2. Change restart: argument in config file from false to true
+   That's the only change in config file.
+
+3. The only difference from original run script is you don't use
+   main_input_filename.  Instead, you use restart_file_XX_nemsio which
+   you get from original restart output file
+   nmmb_rst_XX_nio_HHHHh_00m_00.00s where XX is the domain ID and HHHH
+   is the forecast hour of the restart time.
+
+Limitations:
+
+1. In order to keep bit-identical results, restart must be written
+   (used) on full hour
+
+2. Restart cannot be more frequent than history output and must be
+   multiplier of history output i.e. if history is written on 3 hours,
+   model can be restarted on 3, 6, 9, ... hours (need to be fixed
+   later)
+
+TODO:
+
+1. Allow writing restart file at any time in forecast
+
+
+### How to use time series output in NMMB:
+
+Time series output in NMMB is optional output that is turned on by
+providing appropriate namelist file in run directory. The name of that
+file must be ts_locations.nml, and the contents of the file is as
+follows:
+
+    &TS_LOCATIONS
+    NPOINTS=2,
+    POINTS_LON=-106.0, -110.0,
+    POINTS_LAT=54.0, 50.0
+    /
+
+where NPOINTS defines number of locations and POINTS_LON,POINTS_LAT
+are arrays of longitudes and latitudes of selected points in degrees
+(-180.0 to 180.0).
+
+The output filenames are ts_p01_d01.bin,ts_p02_d01.bin,
+ts_p01_d02.bin,ts_p02_d02.bin etc.  The p01 indicates the point number
+from 1 to NPOINTS and d01,d02 indicate domain number
+
+The ncarg program tsplot that can be used to plot time series is
+located in `/u/wx20du/plot_timeseries`.  It requires a control file as a
+command line argument. For example if the control file is named
+tsplotsetup_nmm you will need to run:
+
+    $ ./tsplot tsplotsetup_nmm
+
+which will create gmeta file. Sample control file (tsplotsetup_nmm) is
+also located in `/u/wx20du/plot_timeseries` directory.
+
+
+Nesting
+-------
+
+The NMM-B has telescoping static and moving nest capability.  All
+domains, whether the uppermost parent or any nest, are functionally
+equivalent and thus each needs its own configure file.  Both 1-way and
+2-way interaction between parent and child domains are available.
+
+### For 1-way nesting:
+
+1. Set 'nest_mode' to '1-way' in all configure files.  The value of
+   'generation' is not relevant and can be ignored.
+ 
+2. The uppermost parent's configure file:
+
+    a. The variable 'num_domains_total' must be set in this domain's
+       configure file.  This is the total number of domains in the run
+       which includes the upper parent plus all nests.  This variable
+       does not need to be set in any other configure files (if it is
+       set in others it is not read).
+
+    b. Set the value for 'my_domain_id' which must always be 1 for the
+       uppermost parent.
+
+    c. Set the value for 'my_parent_id' to -999.
+
+    d. Set 'n_children' to the number of child nests associated with
+       the uppermost parent.  This does not include any nests inside
+       the first generation of child nests because all interactions
+       with nesting involve only a parent and its first generation of
+       children.
+
+    e. Set 'my_domain_moves' to false.
+
+3. Static nest configure files:
+
+    a. In each nest's configure file set 'my_domain_id' to a unique
+       integer greater than 1.  The user is free to choose these
+       integer identifiers in any way desired except that all domain
+       IDs must ultimately form a monotonic sequence.  In other words
+       if the run contains 2 first generation nests and one of those
+       nests contains a nest then the three nests may use any integer
+       value between 2 and 4 as their domain ID so that the final IDs
+       are 1,2,3, and 4 but never something like 1,2,4,5 which is not
+       a monotonic sequence.
+
+    b. Set the value for 'my_parent_id' to the integer ID that was
+       given to this nest's parent domain.
+
+    c. Set 'n_children' to the number of child nests inside of this
+       nest but not counting any deeper nests inside of those children.
+
+    d. Set 'i_parent_start' and 'j_parent_start' to the I and J
+       indices of the H point on the nest's parent's grid that
+       coincide with the nest's SW corner H point.  This implies that
+       any nest's SW corner must lie directly on a parent grid H
+       point.
+
+    e. Set 'parent_child_space_ratio' to the ratio of the size of the
+       parent's grid increment to the child's.  Make this an integer.
+
+    f. Set 'input_ready' to true if input data has already been
+       produced for this nest.  Set it to false if input data has not
+       been produced and the user wants the parent to generate the
+       nest's input data.  NPS-generated input data is naturally
+       preferable.
+
+    g. Set 'my_domain_moves' to false.
+
+4. Moving nest configure files.  See regrtession test examples: 1tests/nmm_conf/nmm_mnests*conf_*`
+
+    a. Follow all instructions in 3(a)-(f).  
+
+    b. Set 'my_domain_moves' to true.
+
+    c. Set 'ratio_sfc_files' to the ratio of the uppermost parent's
+       grid increment to this moving nest's.  Again this should be an
+       integer.  The use of moving nests requires the user to generate
+       eight different surface-related static datafiles for each
+       different resolution of nest in the run.  If there are two
+       moving nests with parent_child_space_ratio=3 then a set of the
+       following eight files must be pre-generated: ALBASE_ij_3,
+       FIS_ij_3, ISLTYP_ij_3, IVGTYP_ij_3, MXSNAL_ij_3, SM_ij_3,
+       TG_ij_3, and VEGFRC_ij_3.  These are the base albedo, sfc
+       geopotential, soil type, vegetation type, maximum snow albedo,
+       sea mask, deep underground temperature, and vegetation
+       fraction, respectively, at the 3x nests' resolution but which
+       span the entire upper parent domain.
+
+       This data must be present as the nests move across the parent's
+       domain.  Then assume one of the 3x moving nests contains a 3x
+       moving nest inside it.  In the configure file for the inner
+       nest the value of ratio_sfc_files would be 9 and the eight sfc
+       datafiles would contain 9x data that spans the entire upper
+       parent's domain.  Note that the final integer in these files'
+       names must be the value of ratio_sfc_files.
+
+    d. Set the values of 'nrows_p_upd_w', 'nrows_p_upd_e',
+       'nrows_p_upd_s', and 'nrows_p_upd_n' to 2.  This is the number
+       of rows around the edge of the nest domain that must be updated
+       by the parent after the nest moves.  The nest does not use its
+       own data in these rows for updating itself because V is not
+       present on the north or east sides and some variables needed in
+       the integration part of the domain are not computed in these
+       rows.
+
+    e. If a moving nest has a child moving nest then for the outer
+       nest set the value of 'centers_distance'.  This is the distance
+       in units of the outer nest's grid increments that the inner
+       nest's center can move from the outer nest's center before the
+       outer nest shifts so as to bring their centers very near to
+       each other again.
+
+    f. If the uppermost parent domain is global then set the value of
+       'latitude_limit'.  If a nest domain (or the outermost nest in a
+       telescoping complex of nests) reaches this latitude in either
+       hemisphere then the nest stops and never moves again.  Thus the
+       nest's delta X cannot become too small due to converging
+       meridians which would result in violation of the CFL criterion.
+
+    g. The file called nest.txt must be present in the working
+       directory.  The file's 2nd column holds critical specifications
+       regarding variables in the Solver internal state when nests
+       move.  An explanation is given at the beginning of that file.
+
+5. Task assignment: When 1-way nesting is used then the user assigns
+   forecast (compute) tasks and write (quilt) tasks uniquely for each
+   domain in that domain's configure file.  The I by J layout of
+   forecast tasks are specified with configure variables inpes and
+   jnpes, respectively.  Any number of groups of write tasks can be
+   assigned with the variable called write_groups.  More than one
+   write group should be used if the integration might move from one
+   output time to the next before write tasks have finished with the
+   earlier output.
+
+   The number of tasks in each write group is assigned with the
+   variable called write_tasks_per_group.  The sum of
+   `inpes*jnpes+write_groups*write_tasks_per_group` for all domains must
+   equal the number of tasks that are assigned to the run in the
+   runscript.  This task assignment lets the user fine-tune the
+   balance of work being done on all domains to minimize the time that
+   any parent or child waits for the other thus leading to all compute
+   tasks being busy virtually all the time as all domains integrate
+   their forecasts simultaneously.
+
+6. Configure file names: The run script will copy each configure file
+   to configure_file_01, configure_file_02, etc. where the final
+   integers on the filenames form a monotonic sequence.  The uppermost
+   parent's configure file must be used for configure_file_01 but the
+   user is not required to make the remaining files' names contain the
+   same integer as their corresponding configure files' domain IDs.
+
+### For 2-way nesting
+
+1. Set 'nest_mode' to '2-way' in all configure files.  The integer
+   value of each domain's generation must be given to the variable
+   called 'generation'.  The generation variable is ignored in 1-way
+   mode.
+
+2. The nests.txt file must be present in the working directory.  The
+   file's 3rd column specifies which of the Solver's internal state
+   variables will be used in the 2-way exchange from child to parent.
+   Currently 2-D and 3-D real variables may be selected.  As stated in
+   that file's instructions, an H is used to specify that the given
+   H-pt variable is to be part of the 2-way exchange while a V
+   indicates that the given V-pt variable is to be part of the 2-way
+   exchange.
+
+3. The same rules apply for running static or moving nests in 2-way
+   nesting as in 1-way nesting described above.
+ 
+4. Task assignments for 2-way interaction cannot be done in the same
+   way as they are for 1-way because that would leave too many
+   processors idle at any given time as children and parents wait on
+   each other to send internal update values and vice versa.
+   Therefore the integration in 2-way nesting will continually cycle
+   through the generations sequentially but within each generation all
+   domains will execute their forecasts concurrently.  To maximize
+   overall efficiency the user first decides which generation of
+   domains will be the most computationally expensive.  Then ALL
+   available compute and forecast tasks in the run are assigned uniquely
+   to the domains in that generation where they can be balanced so
+   that when this generation executes then all compute tasks will be
+   busy.
+
+   As many of the total number of available compute tasks are assigned
+   to each of the remaining generations as can be used efficiently,
+   i.e., assigning more compute tasks to the generation would not
+   decrease runtime or would increase it due to a large amount of halo
+   exchanges for task subdomains that are too small.  So that the
+   writing of history and restart output remains asynchronous all
+   write and quilt tasks must still be assigned uniquely to each
+   indiviual domain and cannot be shared among different domains as
+   the compute tasks are.  Therefore the sum of `inpes*jnpes` for all
+   domains in the most expensive generation plus the sum of
+   write_groups times write_tasks_per_group for all domains must equal
+   the total number of tasks assigned to the run in the runscript.
+
+### Specifying nest boundary variables
+
+The boundary variables in single-domain regional runs and for the
+(regional) upper parent in nested runs are hardwired to PD,T,Q,CW,U,V.
+However the user specifies which variables are desired for the nests'
+domain boundaries.  This is done through the external nests.txt file.
+A column labeled 'BC' in that file is used for this purpose.  The user
+places an 'H' or a 'V' in that column for the 2-D, 3-D, or 4-D Solver
+internal state H-pt or V-pt variables that will be updated by the
+parent(s) on the nest boundaries.  If the desired Solver internal
+state variable is not listed in nests.txt then simply add it.  If the
+desired variable is not yet in the Solver internal state then see the
+section below called 'How to add a new variable'.  The copy of
+nests.txt in job and regression_tests currently specifies PD,T,Q,CW,U,V as
+the nest boundary variables.
+
+### How to add a new variable:
+
+1. Go to `~/src/atmos/nmm` directory
+
+2. In file module_SOLVER_INTERNAL_STATE.F90 search for similar
+   variable; in this case let's use 2D variable ALBEDO as an example.
+
+    a. First command is declaration of the pointer, add your variable
+       to that command
+
+    b. Search further and next call is allocating size of your new variable:
+
+            CALL SET_VAR_PTR(int_state%VARS,NV,AF,'ALBEDO',int_state%ALBEDO,(/IMS,JMS/),(/IME,JME/))
+
+    In most cases you'll need just to copy this line and change ALBEDO
+       to the name of your variable.
+
+3. Now, your variable is placed in internal state, allocated and given
+   initial value to NaN!!!  If you want to initialize a physics
+   variable with different number, in same directory open
+   module_SOLVER_GRID_COMP.F90, go to subroutine PHYSICS_INITIALIZE,
+   search for string "Initialize allocated arrays" and add your
+   variable to appropriate place or to the call to the appropriate
+   physics scheme's initialization subroutine and initialize with
+   desired value.
+
+Same procedure should be done with 3D and 4D arrays.
+
+
+### Adding variable to the history and/or restart file:
+
+1. If this is a new (non-existing) variable in internal state, go
+   through steps 1-3 in previous section.
+
+2. When you have existing variable in internal state, go to
+   `~/job/regression_tests` directory, and find the file called
+   solver_state.txt.
+
+3. Again let's use ALBEDO as an example, open file solver_state.txt and
+   search for ALBEDO.
+
+    a. copy and paste line:
+
+            'ALBEDO'     H       R       O      -      -   T  'Dynamic albedo'
+
+    b. rename ALBEDO to the name of the variable you used in step 2b
+      when you added the variable, then give short description in the
+      8th column.
+
+4. There are 8 columns in the file: Name History Restart Owned Import
+   eXport Time_series Description
+
+    a. If you want your variable in History file, leave letter H in
+       second column, if not, just leave dash.
+
+    b. If you want your variable in Restart file, leave letter R in
+       third column, if not, just leave dash.
+
+    c. If you want your variable as a part of Time series, leave
+       letter T in seventh column, if not, just leave dash.
+
+5. Columns 4, 5 and 6 are part of "ownership suite" and are intended
+   to be used for exchange between dynamics and physics without
+   necessary (duplicate) allocations.
+
+       - O is for owned
+       - X is for export
+       - I is for import
+
+   Designate 'O' for most new variables which will tell the code to
+   allocate memory for it.  Only if you know the variable will be used
+   as an unallocated pointer into another variable that has been
+   allocated will you designate a blank ('-').  X/I are used to
+   specify which variables must be exported/imported between the
+   Solver and the component that handles boundaries and motion of
+   nests.  Specify blanks ('-') unless you are certain the new
+   variable is required for nests' boundaries and/or motion.
Index: checkout/doc/old/README.css
===================================================================
--- checkout/doc/old/README.css	(nonexistent)
+++ checkout/doc/old/README.css	(revision 94669)
@@ -0,0 +1,100 @@
+body {
+    margin: auto;
+    padding-right: 1em;
+    padding-left: 1em;
+    max-width: 44em; 
+    border-left: 1px solid black;
+    border-right: 1px solid black;
+    color: black;
+    font-family: Verdana, sans-serif;
+    font-size: 100%;
+    line-height: 140%;
+    color: #333; 
+}
+pre {
+    border: 1px dotted gray;
+    background-color: #ececec;
+    color: #1111111;
+    padding: 0.5em;
+}
+code {
+    font-family: monospace;
+}
+table {
+    border-collapse: collapse;
+}
+table, th, td {
+  border: 1px solid black;
+}
+th, td {
+  padding: 0.5em;
+}
+h1 a, h2 a, h3 a, h4 a, h5 a { 
+    text-decoration: none;
+    color: #4a5ada;
+}
+h1 code, h2 code, h3 code, h4 code, h5 code { 
+    font-size: 120%;
+}
+h1, h2, h3, h4, h5 {
+    font-family: verdana;
+    font-weight: bold;
+    border-bottom: 1px dotted black;
+    color: #4a5ada;
+}
+h1 {
+        font-size: 150%;
+        text-align: center ;
+        border: 2px solid black;
+        padding: 2em;
+}
+h1:first-of-type{
+        font-size: 150%;
+        text-align: center ;
+        border: 2px solid black;
+        background-color: #ddddff;
+}
+
+h2 {
+    font-size: 140%;
+    border: 1px dotted black;
+    text-align: center;
+    padding: 0.5em;
+}
+
+h3 {
+        font-size: 130%;
+}
+
+h4 {
+        font-size: 120%;
+        font-style: italic;
+}
+
+h5 {
+        font-size: 110%;
+        font-style: italic;
+}
+
+h1.title {
+        font-size: 200%;
+        font-weight: bold;
+        padding-top: 0.2em;
+        padding-bottom: 0.2em;
+        text-align: left;
+        border: none;
+}
+
+dt code {
+        font-weight: bold;
+}
+dd p {
+        margin-top: 0;
+}
+
+#footer {
+        padding-top: 1em;
+        font-size: 70%;
+        color: gray;
+        text-align: center;
+        }
Index: checkout/doc/old/NEWTEST.md
===================================================================
--- checkout/doc/old/NEWTEST.md	(nonexistent)
+++ checkout/doc/old/NEWTEST.md	(revision 94669)
@@ -0,0 +1,876 @@
+<a name="new-system"></a>New Test System
+========================================
+
+The old regression test system has been replaced by a new system.  It
+has a different design that the old one.  It has a superset of the
+capabilities of the old system, but the different design leads to
+advantages and disadvantages.
+
+Presently, that implementation is available by the NEMS/tests/rtgen
+script, and two scripts it generates (rtrun, rtreport).  For backward
+compatibility, there is a wrapper "rt.sh" script to prevent users from
+having to learn a new system if they are only running the regression
+tests (not modifying them).
+
+<a name="design"></a>Design and Capabilities
+--------------------------------------------
+
+This system works on a different principle than the older one.  The
+old system ran shell scripts specific to each model or test which
+copied files from outside the NEMS test area and ran external programs
+to generate some inputs.
+
+The new system has a directory of prepared inputs, has no external
+dependencies, and simply runs the NEMS executable without any
+test-specific scripts.  In other words, scripts like the
+`exglobal_fcst_nems.sh` are no longer used.  This makes porting and
+workflow changes simpler, but has the disadvantage of not testing
+model workflow scripts.  That disadvantage is intentional; the purpose
+of the NEMS regression tests is to test the NEMS, not model workflow
+scripts.
+
+<a name="running"></a>Running the System
+----------------------------------------
+
+This section explains how to run the system in its simplest form.
+Later sections discuss [running subsets of the tests](#run-sub),
+[dependency resolution](#dep-res), and [available tests](#list-avail).
+We provide two methods: a simple way using the rt.sh wrapper, and a
+more complex way that provides complete control and flexibility.
+
+### <a name="new-rtsh"></a>Simple Method: rt.sh
+
+For backward compatibility, there is an rt.sh script that acts
+similarly to the old rt.sh.  Some aspects are different to give extra
+flexibility.
+
+To execute in an sh-family shell (sh, bash, ksh, etc.)
+
+    cd NEMS/tests
+    ./rt.sh (options) > rt.log 2>&1 &
+
+To execute in a csh-family shell (csh, tcsh):
+
+    cd NEMS/tests
+    ./rt.sh (options) >& rt.log &
+
+This will run rt.sh in the background and send all output to the
+`rt.log` file.  To see the success or failure information, look in the
+`rt.log` file.
+
+The `(options)` specify what is to be run.  Common needs are:
+
+* `-f` = run the full test suite
+* `-s` = only run the "standard" tests
+* `-t setname` = run the specified set of tests.  See
+  `compsets/all.input` for the full list.  Common names are `standard`,
+  `gfs`, and `nmm`
+* `-b setname` = regenerate the baseline.
+* `-n /path/to/baseline` = specifies the location of the baseline
+  when running the suite in verification or baseline generation modes.
+* `-r PLATFORM:/path/to/rtgen.###` - used by the full test method.
+  See below.
+* `-p project` = set the project or account to use for CPU hours.
+  If unspecified, one will be automatically picked based on 
+  cpu availability.
+
+### Full Test Method
+
+The process of running is:
+
+    ./NEMS/tests/rtgen   # generates rtrun and rtreport commands
+    /path/to/USERNAME/rtgen.(ID)/rtrun (options)
+    /path/to/USERNAME/rtgen.(ID)/rtreport
+
+To use this for a commit to the trunk, one must copy the results to
+the NEMS/tests directory.  This could be done manually, or one could
+run rt.sh and tell it to skip the rtgen step.  To do this, use the
+`rt.sh -r` option:
+
+    ./rt.sh -r (PLATFORM):/path/to/USERNAME/rtgen.(ID)
+
+where `(PLATFORM)` is "theia" or "wcoss.phase1"
+
+The rest of this section explains the purpose and function of rtgen,
+rtrun and rtreport.
+
+### Step 1: Generate Test Scripts (rtgen)
+
+The first step is to run `rtgen`.  This will generate a set of scripts
+to run the requested tests.  If you do not request any tests, it will
+run all tests.
+
+    ./NEMS/tests/rtgen
+
+That command will give you instructions and will log the more
+important parts of its execution:
+
+    11/17 18:42:38Z rtgen-INFO:  Will run all known tests.
+    11/17 18:42:50Z rtgen-INFO:  Auto-chosen project for job submission is 'cmp'
+    11/17 18:42:51Z rtgen-INFO:  Auto-chosen ptmp is '/path/to/USERNAME'
+    11/17 18:42:51Z rtgen-INFO:  Generating workflow with id 23768.
+    11/17 18:42:55Z rtgen-INFO:  Requested test has been generated.
+    You need to run the test now.   You have three options:
+    OPTION 1: Put this in your cron:
+      */3 * * * * /path/to/USERNAME/rtgen.23768/rtrun --step --zero-exit \
+         > /path/to/USERNAME/rtgen.23768/rtrun-cron.log 2>&1
+
+    OPTION 2: Run this program:
+      /path/to/USERNAME/rtgen.23768/rtrun --loop
+
+    OPTION 3: Verbose mode: run this program:
+      /path/to/USERNAME/rtgen.23768/rtrun) -v --loop
+    Adding -n to that command will disable colors.
+
+### Step 2: Run the Test (rtrun)
+
+The rtrun command runs the tests until all have succeeded or failed.
+You have three options for how to run this.  The easiest execution
+option is number 3, which runs on the command line and reports the
+queue status every few minutes.  The path to rtrun will vary, but the
+command will look something like this:
+
+    /path/to/USERNAME/rtgen.23768/rtrun -v --loop
+
+If the colors annoy you, add the `-n` switch, and if you don't want
+the queue state, remove the `-v` switch.
+
+The components of that path are:
+
+* `/path/to` - a scrub area, such as /scratch4/NCEPDEV/stmp4 or /ptmpp1
+* `USERNAME` - your username, such as `emc.nemspara` or `Samuel.Trahan`
+
+The `rtrun` command will generate output like this:
+
+    11/17 00:19:21Z rtrun INFO: check dependencies and submit jobs...
+    11/17 00:19:22Z rtrun INFO: check status...
+    11/17 00:19:22Z rtrun INFO: workflow is still running and no jobs have failed.
+    11/17 00:19:22Z rtrun INFO: sleep 2
+    11/17 00:19:24Z rtrun INFO: get queue information
+     Job ID  Reserv   Queue   Procs ST Queue Time  Stdout Location
+    -------- ------ --------- ----- -- ----------- ------------------------------------
+      573626        dev          64 R  11/17 00:14 /.../tmp/log/test_gfs_gocart_nemsio.log
+    From bjobs -l  -u Samuel.Trahan (age 0 sec.)
+    11/17 00:19:24Z rtrun INFO: sleep 100
+
+It will keep looping until all jobs have succeeded or failed.  If all
+goes well, the tests will all pass and you will see this message:
+
+    11/17 00:21:04Z rtrun INFO: check dependencies and submit jobs...
+    11/17 00:21:05Z rtrun INFO: check status...
+    11/17 00:21:05Z rtrun INFO: workflow is complete and all jobs succeeded.
+
+### Step 3: Report Results (rtreport)
+
+At that point, you can run rtreport to get a report of the tests.
+Actually, you can run rtreport at any time.  If the tests are not yet
+complete, it will tell you which ones are complete.  It will report
+all it knows about failed tests too.  There are two output formats:
+
+To run:
+
+    /path/to/USERNAME/rtgen.23768/rtreport [mode]
+
+Where the optional `mode` is one of:
+
+  * `status` - short output that only lists failed tests and counts
+    the number of failed, complete, and unfinished tests.
+
+  * `txt` - full text output of all information (the default).
+
+The output of `txt` mode (the default) looks something like this
+
+    BUILD nmm.x: SUCCEEDED
+    BUILD nmm.debug.x: SUCCEEDED
+    BUILD gsm.x: SUCCEEDED
+    BUILD gsm_gocart.x: SUCCEEDED
+    TEST #1: PASS
+      Test nmm_cntrl starting.
+      Wed Nov 16 22:51:23 UTC 2016
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0000h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0024h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_bin_0048h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0000h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0024h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_hst_01_nio_0048h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_bin_0024h_00m_00.00s: bit-for-bit identical
+      .../REGRESSION_TEST/NMMB_glob/nmmb_rst_01_nio_0024h_00m_00.00s: bit-for-bit identical
+      TEST PASSED
+    TEST #2: PASS
+      Test nmm_nemsio starting.
+    ... information about more tests ...
+
+
+### <a name="rerun"></a>Rerunning Failed Tests
+
+If a test fails, you can request that it be rerun via the `rtrewind`
+command.  The command is located in the same directory as `rtrun`
+and can be called in two different ways:
+
+    /path/to/USERNAME/rtgen.23768/rtrewind -a
+
+    /path/to/USERNAME/rtgen.23768/rtrewind job1 [job2 [...]]
+
+The first method requests a rerun of ALL tests and builds while the
+second requests only certain ones be rerun.
+
+The jobs (`job1`, `job2`, ...) are the names from the test suite such
+as `gsm.x` or `nmm_cntrl`.  You can optionally include `test_` or
+`build_` before the name, as it is printed by the `rtreport` command.
+
+### <a name="run-sub"></a>Running Subsets of the Test Suite
+
+The test suite, as of this writing, has 48 tests and 5 build options.
+Frequently, you only want to run a few of them.  The `rtgen` script
+has a simple set arithmetic language for specifying what to run.  The
+subsetting is done by the command line.  For example, to run all
+standard nmm tests, you need to take the intersection of those two
+sets of tests:
+
+    ./NEMS/tests/rtgen 'inter(nmm,standard)'
+
+The `rtgen` will generate a workflow to run just those tests.  
+
+Other subsetting operations:
+
+    union(nmm,wam)   # run all nmm and wam tests
+    minus(gfs,wam)   # run all gsm (gfs) tests that are not wam tests
+    {gfs_slg,nmm_cntrl}  # run the gfs_slg and nmm_cntrl tests
+
+You can combine multiple operations:
+
+    minus(inter(union(gfs,nmm),standard),{gfs_slg,nmm_cntrl})
+
+That will ask rtgen to run all gsm (gfs) and nmm tests that are
+standard tests, except for `gfs_slg` and `nmm_cntrl`.
+
+Despite that, the rtgen will still run the gfs_slg test.  Why?
+Dependency resolution.
+
+### <a name="dep-res"></a>Dependency Resolution
+
+Some tests have dependencies, and `rtgen` will resolve those
+dependencies automatically, similar to how `make` works.  For example,
+the `gfs_slg_rsthst` requires the `gfs_slg` to run first.  Output from
+`gfs_slg` is used as input to `gfs_slg_rsthst`.  If you ask `rtgen` to
+run `gfs_slg_rsthst` without running `gfs_slg`, it will see the
+dependency and add `gfs_slg` to your list of tests.  The builds are
+handled the same way.  The `gfs_slg` has a dependency on the build
+`gsm.x`, and so `rtgen` will always add the `gsm.x` build if you
+select the `gfs_slg` test.
+
+
+### <a name="list-avail"></a>List of Available Tests and Sets
+
+The configuration for `rtgen` is stored in the compsets/all.input file
+in the app level repository.  This is where you specify the available
+tests and sets of tests.
+
+The top few lines of that file look like this
+
+    load 'gsm.input'
+    load 'nmm.input'
+    run nmm_cntrl              @ nmm, standard, baseline, nmmglob
+    run nmm_nemsio             @ nmm,                     nmmglob
+    run nmm_rest               @ nmm,                     nmmglob
+    ... many more "run" statements ...
+
+The first two lines import the details of the test from other files.
+The lines beginning with `run` specify a test to run and the sets it
+belongs to.  The test must be one declared in the other file,
+as discussed later in this document.
+
+The list of sets after the @ sign are the ones recognized by the
+[subsetting functionality of rtgen](#run-sub). 
+
+Note that you can enable tests on only certain platforms by including
+a comparison operator in the list of subsets:
+
+    run gfs_slg_2thread        @ gfs, standard, baseline, slg, plat==wcoss.phase1
+
+This line ensures the `gfs_slg_2thread` is only available on WCOSS Phase 1.
+
+
+
+<a name="work-area"></a>Work Area Contents
+------------------------------------------
+
+Running the `rtgen` creates a directory in a scrub area which will
+contain the generated scripting system, input and output files, logs,
+and resource usage information.  This section documents those files
+and directories.
+
+Recall that running `rtgen` creates a directory with a name like this:
+
+    /path/to/USERNAME/rtgen.23768
+
+That directory contains the following:
+
+* rtrun script
+
+* rtreport script
+
+* jobs directory
+
+* scripts directory
+
+* ush directory
+
+* src directory
+
+  * install.sh
+
+  * uninstall.sh
+
+* exec directory
+
+* include directory
+
+* rocoto directory
+
+* com directory
+
+* tmp directory
+
+  * tmp/log directory
+
+### Jobs, Scripts and Ush
+
+These are the three tier NCEP workflow directories and have the usual
+meanings:
+
+* jobs - sets up the environment and passes control to the "scripts" level
+
+* scripts - high-level logic for each test
+
+* ush - low-level utility functions
+
+For each test, there is one "jobs" directory file and one "scripts"
+directory file.  The "scripts" directory and "jobs" directory are
+populated by the tests blocks which will be discussed in great detail
+in the [Test Description Language](#desc-lang) section.  They are
+generated from the [test blocks](#new-tests).
+
+### Src, Exec, and Include
+
+The `src` directory does not contain source code.  Instead, it
+contains two scripts that describe how to build or uninstall the
+`NEMS.x`
+
+* install.sh - knows how to build the NEMS.x based on the instructions
+  in the [build blocks](#new-build) as explained in the [Test
+  Description Language](#desc-lang) section in great detail.
+
+* uninstall.sh - deletes the copies of `NEMS.x` and `modules.nems`
+  created by install.sh.
+
+The `install.sh` creates executables and modulefiles which are copied
+into the `exec` and `include` directories.
+
+* exec - one executable for each NEMS build
+
+* include - one file for each NEMS build containing a sequence of
+  of "module load" commands.  These commands will be run before
+  executing the NEMS.x
+
+### Rocoto Directory
+
+The `rtgen` makes one file in the `rocoto` directory.  The `rtrun`
+will create a second file.
+
+* workflow.xml - the definition of the workflow generated by `rtgen`.
+  This includes dependencies and resource requirements.  There is one
+  shell command for each test or build.
+
+* workflow.db - created by `rtrun`, this contains the Rocoto internal
+  state information.
+
+### Tmp and Logs
+
+The `tmp` directory contains all logs and all execution directories
+for each test.
+
+* tmp/log/rocoto.log - log file from Rocoto.  Contains information about
+  batch system events, such as job failures or job submissions.
+
+* tmp/log/*.log - all other files contain logs about a test or build
+
+* tmp/* - all other directories are work areas for tests.  They
+  contain inputs and outputs from the NEMS.x
+
+### Scripts rtrun and rtreport
+
+These are discussed in earlier sections.  The scripts are generated
+automatically by `rtgen`.  The `rtrun` runs Rocoto and the `rtreport`
+scans the reports, combining them into one text file.
+
+### COM directory
+
+This directory contains one subdirectory for each test with all
+verified files as described in a test's (criteria)[#criteria] block.
+It also contains the "report.txt" file with the report of the test
+success or failure.
+
+<a name="desc-lang"></a>Test Description Language
+-------------------------------------------------
+
+This chapter discusses the language used by the `rtgen` tool to
+describe regression tests and compsets.  The language consists of
+"modules" which are simply a collection of variables and functions. A
+module has a type: build, test, hash, etc.  A set of `run` commands
+list which runnable modules should be executed.
+
+### <a name="vardef"></a>Variable Definitions and Modules
+
+The simplest type of module is a hash, which looks like this:
+
+    nems_vars={
+        atm_model='none'
+        atm_petlist_bounds="-1 -1"
+        ocn_model='none'
+        ocn_petlist_bounds="-1 -1"
+        ice_model='none'
+        ice_petlist_bounds="-1 -1"
+        med_model='nems'
+        med_petlist_bounds="-1 -1"
+        med_atm_coupling_interval_sec='-1'
+        med_ocn_coupling_interval_sec='-1'
+    }
+
+In this example, we have declared a hash called `nems_vars` which
+contains several variables, such as `atm_model` and
+`atm_petlist_bounds`.  Later on, another module declaration can "use"
+this module, to import its variables:
+
+    nmm_vars_global={
+        use plat%nmm_dflt
+        use nems_vars
+        use common_vars
+        use nmm_vars
+        use nmm_aliases
+        use nmm_uncoupled
+        GBRG="glob"
+        CNTL_NAME='NMMB_glob'
+    }
+
+Values can include variable substitution, which uses a similar syntax
+as shell, but with different escape characters:
+
+    common_vars={
+        THRD=1
+        WLCLK=15
+        GEFS_ENSEMBLE=0
+        GEN_ENSEMBLE=0
+        WRITE_DOPOST='.false.'
+        POST_GRIBVERSION='grib1'
+        CONF="@[plat%PARMnems]"
+    }
+
+Here, the `CONF` variable in the `common_vars` module has the value of
+the `PARMnems` variable in the `plat` module.
+
+### Strings
+
+There are three ways of specifying a string:
+
+* Double quotes: "... text here with @[VARIABLE] expansion ..."
+* Single quotes: '... text here with no variable expansion ...'
+* Block string:
+
+        [[[multi-line string
+        with @[VARIABLE] expansion ]]]
+
+If you need to insert a literal @ into the string, you have three
+options.  In these examples, we'll use the multi-line string format:
+
+* [[[  @['this text is not expanded']   ]]]
+* [[[  @["this text is not expanded"]  ]]]
+* [[[ Simple literal @[@] ]]]
+
+### <a name="embedscript"></a> Embedded Scripts
+
+Most of the scripts required to run the tests are automatically
+generated, but there are occasional instances when you need to specify
+specific code.  This is done via `embed` blocks:
+
+    embed bash nems_regtest_prep(RUNDIR,modules,CNTL) [[[
+            mkdir -p "$RUNDIR" "$CNTL"
+            cd @[RUNDIR]
+            source "$modules"
+            export MPI_TYPE_DEPTH=20
+            export ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4
+    ]]]
+
+In this example, we have embedded a bash script called
+`nems_regtest_prep`.  
+
+#### Embedded Script Variables: $ vs. @
+
+In the example script, there are two methods of doing variable substitution:
+
+* `@[RUNDIR]`
+* `"$RUNDIR"`
+
+They have slightly different meanings.  In the case of `@[RUNDIR]`,
+the value of the `RUNDIR` variable is substituted directly in the
+generated script.  If the variable contained any shell metacharacters,
+those would be copied verbatim.  In the case of `$RUNDIR`, the bash
+variable is used instead.  That variable's value is set before the
+code in `nems_regtest_prep` is run.
+
+Either approach is valid.  It is up to the user to decide which one to use.
+
+### Platform Detection
+
+The test suite needs to reconfigure certain aspects based on platform;
+WCOSS vs. Theia vs. GAEA, etc.  This is done with `platform` blocks.
+These are simply modules with a `detect` function.  After all
+platforms are defined, an `autodetect` block selects between them.
+
+Here is an example of a platform.  This is the one for Phase 1 of WCOSS.
+
+    platform wcoss.phase1 {
+        use wcoss.common
+        CPU_ACCOUNT='NAM-T2O'
+        pex='1'
+        cores_per_node=32
+        MPI='LSF'
+        SHORT_TEST_QUEUE='&SHORTQ;'
+        LONG_TEST_QUEUE='&LONGQ;'
+        BUILD_QUEUE='&BUILDQ;'
+    
+        embed bash detect [[[
+            # This function is used at PARSE TIME to detect whether we are
+            # on WCOSS Phase 1.  It must be very fast and low resource
+            # usage since the parser runs it.
+            if [[ -d /usrx && -d /global && -e /etc/redhat-release && \
+                  -e /etc/prod ]] ; then
+                # We are on WCOSS Phase 1 or 2.
+                if ( ! cat /proc/cpuinfo |grep 'processor.*32' ) ; then
+                    # Fewer than 32 fake (hyperthreading) cpus, so Phase 1.
+                    exit 0
+                fi
+            fi
+            exit 1
+        ]]]
+        ... more wcoss stuff ...
+    }
+
+Note the `embed bash` block called `detect`.  This is the bash
+function that is run to detect whether the script is running on WCOSS
+Phase 1.
+
+Once all platforms are defined, there is an autodetect block:
+
+    autodetect plat (/ wcoss.phase1, theia /)
+
+This will define the `plat` variable, which is a duplicate of either
+`wcoss.phase1` or `theia`.
+
+### <a name="new-build"></a> Build Definition
+
+The `build` blocks define a method of building an executable.  They
+must define three variables and a function:
+
+* `NEMS.x` = path to the NEMS executable created by this build
+
+* `modules.nems` = list of "module load" commands to execute before
+   running the executable
+
+* `target` = file to check to ensure the build succeeded; should be
+   the same as the `NEMS.x` variable
+
+* `build` = an `embed bash` function that builds the program.
+
+Here is an example.  This builds the GOCART-capable standalone GSM in
+the NEMSLegacy branch:
+
+    build gsm_gocart.x {
+        use plat
+        NEMS.x="@[plat%EXECrt]/NEMS_gocart.x"
+        modules.nems="@[plat%INCrt]/NEMS_gocart.x.modules"
+        target="@[NEMS.x]"
+        build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
+                             OPTS="app=GSM-GOCART")
+    }
+
+The NEMSAppBuilder function is declared elsewhere.  It is used by most
+of the `build` definitions to avoid duplication.  That function looks
+like this:
+
+    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
+    [[[
+            mkdir -p "@[plat%EXECrt]" "@[plat%INCrt]"
+            rm -f "@[NEMS.x]" "@[modules.nems]"
+            cd @[plat%HOMEnems]
+    
+            # NOTE: Replace "rebuild" with "norebuild" to disable "gmake clean"
+            ./NEMS/NEMSAppBuilder rebuild $OPTS
+    
+            cd @[plat%SRCnems]
+            cp -fp ../exe/NEMS.x "@[NEMS.x]"
+            cp -fp conf/modules.nems "@[modules.nems]"
+    ]]]
+
+Notice that the four variables we're passing from gsm_gocart.x%build
+are in the definition line of NEMSAppBuilder:
+
+    embed bash NEMSAppBuilder(NEMS.x,modules.nems,OPTS)
+    ...
+    build gsm_gocart.x {
+        ...
+        build=NEMSAppBuilder(NEMS.x="@[NEMS.x]",modules.nems="@[modules.nems]",
+                             OPTS="app=GSM-GOCART")
+
+### <a name="new-tests"></a>Tests
+
+A test is a module that defines the following:
+
+* dependencies - any other tests or builds that have to run first
+
+* `prep` - a preparation step to run before anything else.  This is
+  generally `mkdir`, `module` or `cd` commands.
+
+* `input` - a `filter` block that provides a list of input files or
+  directories and instructions on how to copy or filter them.  This is
+  described below.
+
+* `execute` - a `spawn` block that describes how to run the `NEMS.x`.
+  This is also used to generate job cards to request the needed
+  resources.
+
+* `output` - criteria for validating the test output.  These are
+  usually `criteria` blocks, described below.
+
+This is the `test` block for the global nmm control.  Later text
+describe the meaning of each part:
+
+    # nmm_cntrl test
+    test nmm_cntrl: nmm.x {
+        use nmm_vars_global
+    
+        # Convenience variables:
+        RUNDIR_ROOT="@[plat%TMPrt]"
+        RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+        TEST_DESCR="Compare NMMB-global results with previous trunk version"
+        CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+        TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+        COM="@[plat%COMrt]/@[TEST_NAME]"
+    
+        criteria output {
+            #    NEMS.x output file --------- comparison - control file or dir
+            "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+            "nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+        }
+    
+        # The prep is run at the top of any job.  It should do such things
+        # like making directories and loading modules.
+        prep=nems_regtest_prep(
+            RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
+            CNTL="@[CNTL]")
+    
+        # The execute step runs the program:
+        spawn execute {
+            { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
+        }
+    
+        filters input {
+            # work file         operation   input file
+         "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
+         "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
+         "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
+    
+         "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
+         "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
+    
+         "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
+         "model_configure"        .copy. "configure_file_01"
+    
+         "*"                   .copydir. "@[plat%NMM_DATA]"
+    
+         "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
+         "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
+         "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
+         "fort.28"                .link. "global_o3prdlos.f77"
+         "fort.48"                .link. "global_o3clim.txt"
+    
+         "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
+         "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
+        }
+    }
+    
+#### Test Dependencies
+
+The first line (after the comment) is this:
+
+    test nmm_cntrl: nmm.x {
+
+The `: nmm.x` indicates that the `nmm.x` build has to run before the
+`nmm_cntrl` can start.  The test suite will include that dependency in
+its Rocoto or ecFlow automation system.
+
+#### Test Prep
+
+The prep step is a simple script that prepares the environment.  In
+this case, it just runs the nems_regtest_prep, which we discussed
+earlier:
+
+        # The prep is run at the top of any job.  It should do such things
+        # like making directories and loading modules.
+        prep=nems_regtest_prep(
+            RUNDIR="@[RUNDIR]",modules="@[nmm.x%modules.nems]",
+            CNTL="@[CNTL]")
+
+Note that it refers to `@[RUNDIR]` and `@[CNTL]`.  Those variables are
+defined earlier in the same test:
+
+        # Convenience variables:
+        RUNDIR_ROOT="@[plat%TMPrt]"
+        RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+        TEST_DESCR="Compare NMMB-global results with previous trunk version"
+        CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+        TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+        COM="@[plat%COMrt]/@[TEST_NAME]"
+
+#### Test Input Filter
+
+This block specifies the input files and how to prepare them.  It
+declares an `input` variable inside the `nmm_cntrl` test, which is of
+type `filters`:
+
+        filters input {
+            # work file         operation   input file
+         "input_domain_01"        .copy. "@[TEST_IN]/test_input_nmmb_global"
+         "input_domain_01_nemsio" .copy. "@[TEST_IN]/test_input_nmmb_global.nemsio"
+         "GWD_bin_01"             .copy. "@[TEST_IN]/GWD_bin_01"
+    
+         "nems.configure"      .atparse. "@[CONF]/nems.configure.@[nems_configure].IN"
+         "atmos.configure"     .atparse. "@[CONF]/atmos.configure_nmm"
+    
+         "configure_file_01"   .atparse. "@[CONF]/nmm_conf/nmm_@[GBRG]_conf.IN"
+         "model_configure"        .copy. "configure_file_01"
+    
+         "*"                   .copydir. "@[plat%NMM_DATA]"
+    
+         "VEGPARM.TBL"            .copy. "IGBP_VEGPARM.TBL"
+         "LANDUSE.TBL"            .copy. "IGBP_LANDUSE.TBL"
+         "ETAMPNEW_DATA"          .copy. "ETAMPNEW_DATA.expanded_rain"
+         "fort.28"                .link. "global_o3prdlos.f77"
+         "fort.48"                .link. "global_o3clim.txt"
+    
+         "solver_state.txt"       .copy. "@[plat%PARMnems]/solver_state.txt"
+         "nests.txt"              .copy. "@[plat%PARMnems]/nests.txt"
+        }
+
+Notice that there are four different operations in the middle column:
+
+| Local file          | Operation   | Remote file or directory        |  
+| ------------------- | ----------- | ------------------------------- |
+| `"GWD_bin_01"`      | `.copy.`    | `"@[TEST_IN]/GWD_bin_01"`       |
+| `"*"`               | `.copydir.` | `"@[plat%NMM_DATA]"`            |
+| `"fort.28"`         | `.link.`    | `"global_o3prdlos.f77"`         |
+| `"atmos.configure"` | `.atparse.` | `"@[CONF]/atmos.configure_nmm"` |
+
+* `.copy.` - copies the remote file (third column) to the local file
+  (first column).  
+
+        cp -p "$third_column" "$first_column"
+
+* `.link.` - makes a symbolic link to the remote file (third column)
+  from the local file (first column)
+
+        ln -s "$third_column" "$first_column"
+
+* `.copydir.` - copies from the remote file or directory (third
+  column) all files that match the glob (first column) into the local
+  directory.
+
+        cp -rp "$third_column"/$first_column
+
+* `.atparse.` - runs the remote file (third column) through a filter
+  to create the local file (first column).  The filter will replace
+  text like `@[varname]` with the corresponding variable.  
+
+  In the `.atparse.` variable replacement, only variables from the
+  test's module are replaced.  Hence, if you want many variables
+  accessible to `.atparse.`d files, you need to either declare or
+  `use` them.  The `nmm_cntrl` test does that at the top of its
+  declaration:
+
+        test nmm_cntrl: nmm.x {
+            use nmm_vars_global
+        
+            # Convenience variables:
+            RUNDIR_ROOT="@[plat%TMPrt]"
+            RUNDIR="@[RUNDIR_ROOT]/@[TEST_NAME]"
+            TEST_DESCR="Compare NMMB-global results with previous trunk version"
+            CNTL="@[plat%BASELINE]/@[CNTL_NAME]"      # Control baseline area
+            TEST_IN="@[plat%INPUTS]/@[CNTL_NAME]"   # Test-specific input data
+            COM="@[plat%COMrt]/@[TEST_NAME]"
+
+  Everything in the `nmm_vars_global` module will be available plus
+  all six of the declared "convenience variables"
+
+  Note that variables with a period (".") or percent ("%") in their
+  name are not yet available.  That will be fixed in a later release.
+
+#### Test Execution
+
+The next step is to actually run the `NEMS.x`:
+
+        # The execute step runs the program:
+        spawn execute {
+            { "@[nmm.x%NEMS.x]", ranks="@[TASKS]", threads="@[OpenMPThreads]" }
+        }
+
+The columns inside the `execute` block have these meanings:
+
+* `"@[nmm.x%NEMS.x]"` - the program to run
+
+* `ranks="@[TASKS]"` - number of mpi ranks
+
+* `threads="@[OpenMPThreads]"` - optional; number of threads per rank.
+  Default is 1.
+
+* ppn=8 - not used here; optional.  Specifies the number of MPI ranks
+  per node.  The GSM needs this due to memory limits.  Default is
+  calculated automatically by the system, and will be the largest
+  number of MPI ranks possible.
+
+#### <a name="criteria"></a> Test Verification or Baseline Generation
+
+The last step is to either verify the results or generate the
+baseline.  Both cases are handled by the output criteria block:
+
+    criteria output {
+        #    NEMS.x output file --------- comparison - control file or dir
+        "nmmb_hst_01_bin_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_hst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_hst_01_bin_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_hst_01_nio_0000h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_hst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_hst_01_nio_0048h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_rst_01_bin_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+	"nmmb_rst_01_nio_0024h_00m_00.00s" .bitcmp. "@[CNTL]"
+    }
+
+The columns have this meaning:
+
+* `"nmmb_hst_01_bin_0000h_00m_00.00s"` - local directory file
+
+* `.bitcmp.` - verification method.  Only `.bitcmp.` is supported for now.
+
+* `"@[CNTL]"` - remote directory file or remote directory that
+  contains the baseline.  If it is a remote directory, the file is
+  assumed to have the same name.
+
+In verification mode, the comparisons are performed after running NEMS.x
+
+In baseline generation mode, the local file (first column) is copied
+to the remote location (third column).
\ No newline at end of file
Index: checkout/doc/old/README.GFS.md
===================================================================
--- checkout/doc/old/README.GFS.md	(nonexistent)
+++ checkout/doc/old/README.GFS.md	(revision 94669)
@@ -0,0 +1,169 @@
+GSM Modification Instructions
+-----------------------------
+
+### How to add new variable to sigf file:
+
+1. Go to `~/src/atmos/gfs/dyn` directory
+
+2. Search the new variable, 
+
+    a. If it is in a module, add that module in subroutine
+    POINT_DYNAMICS_OUTPUT_GFS in gfs_dynamics_output.f
+
+    b. If it is not in any module, add the new variable in dynamice
+    internal state, so that it can be passed into
+    POINT_DYNAMICS_OUTPUT_GFS
+
+      1. declare the variable in gfs_dynamics_internal_state_mod.f
+
+      2. allocate the variable in gfs_dynamics_initialize_mod.f
+
+    c. If the new variable is a 2D or 3D field in grid_gr, do nothing
+    (grid_gr has already be declared and allocated)
+
+3. In gfs_dynamics_output.f, add the variable name to the
+   corresponding output list, set the pointer in subroutine
+   POINT_DYNAMICS_OUTPUT_GFS for the new variable. (see example below)
+
+4.  In gfs_dynamics_initialize_mod.f, increase the dimension of the
+   output full grid buffer, `buff_mult_pieceg`, `ngrids_gg`,
+   `ngrids_gg=ngrids_gg+1(2d)`, or `ngrids_gg=ngrids_gg+fld_levs(3d)`
+
+5. If the new variable is a 2D or 3D array, pass it into
+   wrtout_dynamics, add the variable to subroutine grid_collect. In
+   grid_collect.f interpolate the field into full grid field and save the
+   data in buff_mult_pieceg.
+
+With these changes, a field (2d or3d array) or an attribute(1d
+int, real, log) will be added into the sigma field bundle in import
+write state, and it will then be written out in write grid component.
+Eg, to add dpdt(pressure tendency: ptend) in sigf file:
+
+1. dpdt is in grid_gr
+2. in gfs_dynamics_output.f, in DYN_INT_STATE_3D_R_ADIAB, add
+
+        ,'ptend       ', 'OGFS_SIG  ', 'levs      ' &
+
+before tracer "spfh". If adding a new tracer, add that tracer after
+"clwmr".  Notice in POINT_DYNAMICS_OUTPUT_GFS, the pointer for 3d
+real array output is set to buff_mult_pieceg
+
+        R_3D(1)%NAME=>buff_mult_pieceg
+
+we will add the output field ptend in buff_mult_pieceg
+
+3. in subroutine wrtout_dynamics in wrtout_dynamics.f, get dpdt from
+`grid_gr(:,:,g_dpdt:gdpdt+levs-1)`, and pass dpdt to grid_collect
+
+        !
+         do k=1,levs
+           do i=1,lons_lat
+             dpdt(i,lan,k) = grid_gr(i+jlonf,g_dpdt-1+k)
+           enddo
+         enddo
+         call grid_collect (zsg,psg,uug,vvg,ttg,rqg,dpg,dpdt,
+        &          global_lats_a,lonsperlat)
+
+4. in  gfs_dynamics_initialize_mod.f, 
+
+        ngrids_gg=ngrids_gg+levs
+
+5. in grid_collect.f, interpolate the field from reduced grid to full
+grid, and add this field in buff_mult_pieceg before `tracersi(rqg)`
+start:
+
+       !
+        do k=1,levs
+          buffi(:,:) = dpdt(:,:,k)
+          CALL uninterpreg(1,kmsk,buffo,buffi,global_lats_a,lonsperlat,
+       & buff_mult_pieceg(1,1,2+5*levs+k) )
+         enddo
+
+Same procedure should be done with 2D arrays.
+
+### How to add new variable to sfcf or flxf file:
+
+1. Go to `~/src/atmos/gfs/phys` directory
+
+2. Search the new variable,
+
+    a. if it is in a module, add that module in subroutine
+    POINT_PHYSICS_OUTPUT_GFS in gfs_physics_output.f
+
+    b. if it is not in any module, and if the new variable is a scalar
+    or a 1D array, add it in physics internal state, so it can be
+    passed into POINT_PHYSICS_OUTPUT
+
+    c. if it is not in any module, and the new variable is 2D sfc or
+    flx field, in gfs_physics_sfc_flx_mod.f.
+
+      1. for sfc field,add the new variable in data type
+      Sfc_Var_Data,
+        
+      2. for flx field, add the new variable in Flx_Var_Data allocate
+      the new field in gfs_physics_sfc_flx_set_mod.f,if flx field,
+      initalize the field in subroutine flx_init
+
+3. In gfs_physics_output.f, add the variable name to the corresponding
+output list. 'OGFS_FLX' in the list is for flx file, 'OGFS_SFC' is for
+sfc file, 'OGFS_PHY' is for both files. The field name could be
+`"field_name"//"_"//"statistics property"`. SET THE Pointer in
+subroutine POINT_PHYSICS_OUTPUT_GFS for the new variable.
+
+4. If the new variable is a 2D or 3D, sfc or flx array:
+
+    a. increase dimension of sfc or flx output file buffer. In
+    gfs_physics_initialize_mod.f, increase ngrids_sfcc(total number of
+    sfc fields), or ngrids_sfc2d(total number of 2D sfc fields),or
+    ngrids_sfc3d(total 2D fields for all 3D sfc fields),or
+    ngrids_flx(total 2D flx fields)
+
+    b. for sfc field, in subroutine sfc_collect in wrtout_physics.f,
+    interpolate the field to full grid field, put the output full grd
+    field in buff_mult_piecea2d for 2D array, and put into
+    buff_mult_piecea3d for 3D array.
+
+    c. for flx field, in subroutine wrtflx_a in wrtout_physics.f,
+    interpolate the field to full grid field, put the output full grd
+    field in buff_mult_piecef.
+
+With these changes, a field (2d or 3d array) or an attribute(1d
+int, real, or log) will be added into the sfc or flx field bundle in import
+write state, and it will then be written out in write grid component.
+
+Eg, to add sunshine durationtime (sunsd) in flxf file:
+
+1. in gfs_physics_sfc_flx_mod.f, in TYPE Flx_Var_Data, add:
+
+        real(kind=kind_phys),pointer:: suntim(:,:)=>null()
+
+   allocate this array in gfs_physics_sfc_flx_set_mod.f, in allication
+   of xlf field, add:
+
+        flx_fld%suntim  (dim1,dim2), &
+
+2. in gfs_physics_output.f, in array PHY_INT_STATE_2D_R_FLX,add:
+
+        ,'sunsd_acc       ', 'OGFS_FLX        ', 'sfc             ' &
+
+3. add new field in output buffer buff_mult_piecef:
+
+    a. change dimension of buff_mult_piecef in gfs_physics_initialize_mod.f:
+
+            ngrids_flx  = 66+43+6
+
+    b. in wrtflx_a in wrtout_physics.f, interpolate the suntim to full
+    grid fields and save it in buff_mult_piecef
+
+            !
+            !    accumulated sunshine time
+            !
+                  glolal  = flx_fld%suntim
+                  ngrid2d = ngrid2d+1
+                  CALL uninterprez(2,kmsk0,buffo,glolal,global_lats_r,lonsperlar,
+            &     buff_mult_piecef(1,1,ngrid2d))
+            !     if(ierr.ne.0)print*,'wrtsfc gribit ierr=',ierr,'  ',
+            !    x '107)Accumulated sunshine duration (sec)'
+            !
+            !    end sunshine time
+
Index: checkout/doc/old/markdown2.py
===================================================================
--- checkout/doc/old/markdown2.py	(nonexistent)
+++ checkout/doc/old/markdown2.py	(revision 94669)
@@ -0,0 +1,2610 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Trent Mick.
+# Copyright (c) 2007-2008 ActiveState Corp.
+# License: MIT (http://www.opensource.org/licenses/mit-license.php)
+
+from __future__ import generators
+
+r"""A fast and complete Python implementation of Markdown.
+
+[from http://daringfireball.net/projects/markdown/]
+> Markdown is a text-to-HTML filter; it translates an easy-to-read /
+> easy-to-write structured text format into HTML.  Markdown's text
+> format is most similar to that of plain text email, and supports
+> features such as headers, *emphasis*, code blocks, blockquotes, and
+> links.
+>
+> Markdown's syntax is designed not as a generic markup language, but
+> specifically to serve as a front-end to (X)HTML. You can use span-level
+> HTML tags anywhere in a Markdown document, and you can use block level
+> HTML tags (like <div> and <table> as well).
+
+Module usage:
+
+    >>> import markdown2
+    >>> markdown2.markdown("*boo!*")  # or use `html = markdown_path(PATH)`
+    u'<p><em>boo!</em></p>\n'
+
+    >>> markdowner = Markdown()
+    >>> markdowner.convert("*boo!*")
+    u'<p><em>boo!</em></p>\n'
+    >>> markdowner.convert("**boom!**")
+    u'<p><strong>boom!</strong></p>\n'
+
+This implementation of Markdown implements the full "core" syntax plus a
+number of extras (e.g., code syntax coloring, footnotes) as described on
+<https://github.com/trentm/python-markdown2/wiki/Extras>.
+"""
+
+cmdln_desc = """A fast and complete Python implementation of Markdown, a
+text-to-HTML conversion tool for web writers.
+
+Supported extra syntax options (see -x|--extras option below and
+see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
+
+* code-friendly: Disable _ and __ for em and strong.
+* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
+* fenced-code-blocks: Allows a code block to not have to be indented
+  by fencing it with '```' on a line before and after. Based on
+  <http://github.github.com/github-flavored-markdown/> with support for
+  syntax highlighting.
+* footnotes: Support footnotes as in use on daringfireball.net and
+  implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
+* header-ids: Adds "id" attributes to headers. The id value is a slug of
+  the header text.
+* html-classes: Takes a dict mapping html tag names (lowercase) to a
+  string to use for a "class" tag attribute. Currently only supports "img",
+  "table", "pre" and "code" tags. Add an issue if you require this for other
+  tags.
+* link-patterns: Auto-link given regex patterns in text (e.g. bug number
+  references, revision number references).
+* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
+  have markdown processing be done on its contents. Similar to
+  <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
+  some limitations.
+* metadata: Extract metadata from a leading '---'-fenced block.
+  See <https://github.com/trentm/python-markdown2/issues/77> for details.
+* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
+  <http://en.wikipedia.org/wiki/Nofollow>.
+* numbering: Support of generic counters.  Non standard extension to
+  allow sequential numbering of figures, tables, equations, exhibits etc.
+* pyshell: Treats unindented Python interactive shell sessions as <code>
+  blocks.
+* smarty-pants: Replaces ' and " with curly quotation marks or curly
+  apostrophes.  Replaces --, ---, ..., and . . . with en dashes, em dashes,
+  and ellipses.
+* spoiler: A special kind of blockquote commonly hidden behind a
+  click on SO. Syntax per <http://meta.stackexchange.com/a/72878>.
+* tag-friendly: Requires atx style headers to have a space between the # and
+  the header text. Useful for applications that require twitter style tags to
+  pass through the parser.
+* tables: Tables using the same format as GFM
+  <https://help.github.com/articles/github-flavored-markdown#tables> and
+  PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
+* toc: The returned HTML string gets a new "toc_html" attribute which is
+  a Table of Contents for the document. (experimental)
+* use-file-vars: Look for an Emacs-style markdown-extras file variable to turn
+  on Extras.
+* wiki-tables: Google Code Wiki-style tables. See
+  <http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
+* xml: Passes one-liner processing instructions and namespaced XML tags.
+"""
+
+# Dev Notes:
+# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
+#   not yet sure if there implications with this. Compare 'pydoc sre'
+#   and 'perldoc perlre'.
+
+__version_info__ = (2, 3, 2)
+__version__ = '.'.join(map(str, __version_info__))
+__author__ = "Trent Mick"
+
+import sys
+import re
+import logging
+try:
+    from hashlib import md5
+except ImportError:
+    from md5 import md5
+import optparse
+from random import random, randint
+import codecs
+try:
+    from urllib import quote_plus
+except ImportError:
+    from urllib.parse import quote_plus
+
+
+# ---- Python version compat
+
+if sys.version_info[:2] < (2, 4):
+    def reversed(sequence):
+        for i in sequence[::-1]:
+            yield i
+
+# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
+if sys.version_info[0] <= 2:
+    py3 = False
+    try:
+        bytes
+    except NameError:
+        bytes = str
+    base_string_type = basestring
+elif sys.version_info[0] >= 3:
+    py3 = True
+    unicode = str
+    base_string_type = str
+
+# ---- globals
+
+DEBUG = False
+log = logging.getLogger("markdown")
+
+DEFAULT_TAB_WIDTH = 4
+
+
+SECRET_SALT = bytes(randint(0, 1000000))
+def _hash_text(s):
+    return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
+
+# Table of hash values for escaped characters:
+g_escape_table = dict([(ch, _hash_text(ch))
+    for ch in '\\`*_{}[]()>#+-.!'])
+
+
+# ---- exceptions
+class MarkdownError(Exception):
+    pass
+
+
+# ---- public api
+
+def markdown_path(path, encoding="utf-8",
+                  html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
+                  safe_mode=None, extras=None, link_patterns=None,
+                  use_file_vars=False):
+    fp = codecs.open(path, 'r', encoding)
+    text = fp.read()
+    fp.close()
+    return Markdown(html4tags=html4tags, tab_width=tab_width,
+                    safe_mode=safe_mode, extras=extras,
+                    link_patterns=link_patterns,
+                    use_file_vars=use_file_vars).convert(text)
+
+
+def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
+             safe_mode=None, extras=None, link_patterns=None,
+             use_file_vars=False):
+    return Markdown(html4tags=html4tags, tab_width=tab_width,
+                    safe_mode=safe_mode, extras=extras,
+                    link_patterns=link_patterns,
+                    use_file_vars=use_file_vars).convert(text)
+
+
+class Markdown(object):
+    # The dict of "extras" to enable in processing -- a mapping of
+    # extra name to argument for the extra. Most extras do not have an
+    # argument, in which case the value is None.
+    #
+    # This can be set via (a) subclassing and (b) the constructor
+    # "extras" argument.
+    extras = None
+
+    urls = None
+    titles = None
+    html_blocks = None
+    html_spans = None
+    html_removed_text = "[HTML_REMOVED]"  # for compat with markdown.py
+
+    # Used to track when we're inside an ordered or unordered list
+    # (see _ProcessListItems() for details):
+    list_level = 0
+
+    _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
+
+    def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
+                 extras=None, link_patterns=None, use_file_vars=False):
+        if html4tags:
+            self.empty_element_suffix = ">"
+        else:
+            self.empty_element_suffix = " />"
+        self.tab_width = tab_width
+
+        # For compatibility with earlier markdown2.py and with
+        # markdown.py's safe_mode being a boolean,
+        #   safe_mode == True -> "replace"
+        if safe_mode is True:
+            self.safe_mode = "replace"
+        else:
+            self.safe_mode = safe_mode
+
+        # Massaging and building the "extras" info.
+        if self.extras is None:
+            self.extras = {}
+        elif not isinstance(self.extras, dict):
+            self.extras = dict([(e, None) for e in self.extras])
+        if extras:
+            if not isinstance(extras, dict):
+                extras = dict([(e, None) for e in extras])
+            self.extras.update(extras)
+        assert isinstance(self.extras, dict)
+        if "toc" in self.extras and "header-ids" not in self.extras:
+            self.extras["header-ids"] = None   # "toc" implies "header-ids"
+        self._instance_extras = self.extras.copy()
+
+        self.link_patterns = link_patterns
+        self.use_file_vars = use_file_vars
+        self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
+
+        self._escape_table = g_escape_table.copy()
+        if "smarty-pants" in self.extras:
+            self._escape_table['"'] = _hash_text('"')
+            self._escape_table["'"] = _hash_text("'")
+
+    def reset(self):
+        self.urls = {}
+        self.titles = {}
+        self.html_blocks = {}
+        self.html_spans = {}
+        self.list_level = 0
+        self.extras = self._instance_extras.copy()
+        if "footnotes" in self.extras:
+            self.footnotes = {}
+            self.footnote_ids = []
+        if "header-ids" in self.extras:
+            self._count_from_header_id = {}  # no `defaultdict` in Python 2.4
+        if "metadata" in self.extras:
+            self.metadata = {}
+
+    # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
+    # should only be used in <a> tags with an "href" attribute.
+    _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
+
+    # Opens the linked document in a new window or tab
+    # should only used in <a> tags with an "target" attribute.
+    # same with _a_nofollow
+    _a_blank = _a_nofollow
+
+    def convert(self, text):
+        """Convert the given text."""
+        # Main function. The order in which other subs are called here is
+        # essential. Link and image substitutions need to happen before
+        # _EscapeSpecialChars(), so that any *'s or _'s in the <a>
+        # and <img> tags get encoded.
+
+        # Clear the global hashes. If we don't clear these, you get conflicts
+        # from other articles when generating a page which contains more than
+        # one article (e.g. an index page that shows the N most recent
+        # articles):
+        self.reset()
+
+        if not isinstance(text, unicode):
+            # TODO: perhaps shouldn't presume UTF-8 for string input?
+            text = unicode(text, 'utf-8')
+
+        if self.use_file_vars:
+            # Look for emacs-style file variable hints.
+            emacs_vars = self._get_emacs_vars(text)
+            if "markdown-extras" in emacs_vars:
+                splitter = re.compile("[ ,]+")
+                for e in splitter.split(emacs_vars["markdown-extras"]):
+                    if '=' in e:
+                        ename, earg = e.split('=', 1)
+                        try:
+                            earg = int(earg)
+                        except ValueError:
+                            pass
+                    else:
+                        ename, earg = e, None
+                    self.extras[ename] = earg
+
+        # Standardize line endings:
+        text = text.replace("\r\n", "\n")
+        text = text.replace("\r", "\n")
+
+        # Make sure $text ends with a couple of newlines:
+        text += "\n\n"
+
+        # Convert all tabs to spaces.
+        text = self._detab(text)
+
+        # Strip any lines consisting only of spaces and tabs.
+        # This makes subsequent regexen easier to write, because we can
+        # match consecutive blank lines with /\n+/ instead of something
+        # contorted like /[ \t]*\n+/ .
+        text = self._ws_only_line_re.sub("", text)
+
+        # strip metadata from head and extract
+        if "metadata" in self.extras:
+            text = self._extract_metadata(text)
+
+        text = self.preprocess(text)
+
+        if "fenced-code-blocks" in self.extras and not self.safe_mode:
+            text = self._do_fenced_code_blocks(text)
+
+        if self.safe_mode:
+            text = self._hash_html_spans(text)
+
+        # Turn block-level HTML blocks into hash entries
+        text = self._hash_html_blocks(text, raw=True)
+
+        if "fenced-code-blocks" in self.extras and self.safe_mode:
+            text = self._do_fenced_code_blocks(text)
+
+        # Because numbering references aren't links (yet?) then we can do everything associated with counters
+        # before we get started
+        if "numbering" in self.extras:
+            text = self._do_numbering(text)
+
+        # Strip link definitions, store in hashes.
+        if "footnotes" in self.extras:
+            # Must do footnotes first because an unlucky footnote defn
+            # looks like a link defn:
+            #   [^4]: this "looks like a link defn"
+            text = self._strip_footnote_definitions(text)
+        text = self._strip_link_definitions(text)
+
+        text = self._run_block_gamut(text)
+
+        if "footnotes" in self.extras:
+            text = self._add_footnotes(text)
+
+        text = self.postprocess(text)
+
+        text = self._unescape_special_chars(text)
+
+        if self.safe_mode:
+            text = self._unhash_html_spans(text)
+
+        if "nofollow" in self.extras:
+            text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
+
+        if "target-blank-links" in self.extras:
+            text = self._a_blank.sub(r'<\1 target="_blank"\2', text)
+
+        text += "\n"
+
+        rv = UnicodeWithAttrs(text)
+        if "toc" in self.extras:
+            rv._toc = self._toc
+        if "metadata" in self.extras:
+            rv.metadata = self.metadata
+        return rv
+
+    def postprocess(self, text):
+        """A hook for subclasses to do some postprocessing of the html, if
+        desired. This is called before unescaping of special chars and
+        unhashing of raw HTML spans.
+        """
+        return text
+
+    def preprocess(self, text):
+        """A hook for subclasses to do some preprocessing of the Markdown, if
+        desired. This is called after basic formatting of the text, but prior
+        to any extras, safe mode, etc. processing.
+        """
+        return text
+
+    # Is metadata if the content starts with optional '---'-fenced `key: value`
+    # pairs. E.g. (indented for presentation):
+    #   ---
+    #   foo: bar
+    #   another-var: blah blah
+    #   ---
+    #   # header
+    # or:
+    #   foo: bar
+    #   another-var: blah blah
+    #
+    #   # header
+    _meta_data_pattern = re.compile(r'^(?:---[\ \t]*\n)?(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)|([\S\w]+\s*:(?! >)[ \t]*.*\n?)(?:---[\ \t]*\n)?', re.MULTILINE)
+    _key_val_pat = re.compile("[\S\w]+\s*:(?! >)[ \t]*.*\n?", re.MULTILINE)
+    # this allows key: >
+    #                   value
+    #                   conutiues over multiple lines
+    _key_val_block_pat = re.compile(
+        "(.*:\s+>\n\s+[\S\s]+?)(?=\n\w+\s*:\s*\w+\n|\Z)", re.MULTILINE)
+
+    def _extract_metadata(self, text):
+        match = re.findall(self._meta_data_pattern, text)
+
+        if not match:
+            return text
+
+        last_item = list(filter(None, match[-1]))[0]
+        end_of_metadata = text.index(last_item)+len(last_item)
+        if text.startswith("---"):
+            # add 8 charachters for opening and closing
+            # and since indexing starts at 0 we add a step
+            tail = text[end_of_metadata+4:]
+        else:
+            tail = text[end_of_metadata:]
+
+        kv = re.findall(self._key_val_pat, text)
+        kvm = re.findall(self._key_val_block_pat, text)
+        kvm = [item.replace(": >\n", ":", 1) for item in kvm]
+
+        for item in kv + kvm:
+            k, v = item.split(":", 1)
+            self.metadata[k.strip()] = v.strip()
+
+        return tail
+
+    _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
+    # This regular expression is intended to match blocks like this:
+    #    PREFIX Local Variables: SUFFIX
+    #    PREFIX mode: Tcl SUFFIX
+    #    PREFIX End: SUFFIX
+    # Some notes:
+    # - "[ \t]" is used instead of "\s" to specifically exclude newlines
+    # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
+    #   not like anything other than Unix-style line terminators.
+    _emacs_local_vars_pat = re.compile(r"""^
+        (?P<prefix>(?:[^\r\n|\n|\r])*?)
+        [\ \t]*Local\ Variables:[\ \t]*
+        (?P<suffix>.*?)(?:\r\n|\n|\r)
+        (?P<content>.*?\1End:)
+        """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
+
+    def _get_emacs_vars(self, text):
+        """Return a dictionary of emacs-style local variables.
+
+        Parsing is done loosely according to this spec (and according to
+        some in-practice deviations from this):
+        http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
+        """
+        emacs_vars = {}
+        SIZE = pow(2, 13)  # 8kB
+
+        # Search near the start for a '-*-'-style one-liner of variables.
+        head = text[:SIZE]
+        if "-*-" in head:
+            match = self._emacs_oneliner_vars_pat.search(head)
+            if match:
+                emacs_vars_str = match.group(1)
+                assert '\n' not in emacs_vars_str
+                emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
+                                  if s.strip()]
+                if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
+                    # While not in the spec, this form is allowed by emacs:
+                    #   -*- Tcl -*-
+                    # where the implied "variable" is "mode". This form
+                    # is only allowed if there are no other variables.
+                    emacs_vars["mode"] = emacs_var_strs[0].strip()
+                else:
+                    for emacs_var_str in emacs_var_strs:
+                        try:
+                            variable, value = emacs_var_str.strip().split(':', 1)
+                        except ValueError:
+                            log.debug("emacs variables error: malformed -*- "
+                                      "line: %r", emacs_var_str)
+                            continue
+                        # Lowercase the variable name because Emacs allows "Mode"
+                        # or "mode" or "MoDe", etc.
+                        emacs_vars[variable.lower()] = value.strip()
+
+        tail = text[-SIZE:]
+        if "Local Variables" in tail:
+            match = self._emacs_local_vars_pat.search(tail)
+            if match:
+                prefix = match.group("prefix")
+                suffix = match.group("suffix")
+                lines = match.group("content").splitlines(0)
+                # print "prefix=%r, suffix=%r, content=%r, lines: %s"\
+                #      % (prefix, suffix, match.group("content"), lines)
+
+                # Validate the Local Variables block: proper prefix and suffix
+                # usage.
+                for i, line in enumerate(lines):
+                    if not line.startswith(prefix):
+                        log.debug("emacs variables error: line '%s' "
+                                  "does not use proper prefix '%s'"
+                                  % (line, prefix))
+                        return {}
+                    # Don't validate suffix on last line. Emacs doesn't care,
+                    # neither should we.
+                    if i != len(lines)-1 and not line.endswith(suffix):
+                        log.debug("emacs variables error: line '%s' "
+                                  "does not use proper suffix '%s'"
+                                  % (line, suffix))
+                        return {}
+
+                # Parse out one emacs var per line.
+                continued_for = None
+                for line in lines[:-1]:  # no var on the last line ("PREFIX End:")
+                    if prefix: line = line[len(prefix):]  # strip prefix
+                    if suffix: line = line[:-len(suffix)]  # strip suffix
+                    line = line.strip()
+                    if continued_for:
+                        variable = continued_for
+                        if line.endswith('\\'):
+                            line = line[:-1].rstrip()
+                        else:
+                            continued_for = None
+                        emacs_vars[variable] += ' ' + line
+                    else:
+                        try:
+                            variable, value = line.split(':', 1)
+                        except ValueError:
+                            log.debug("local variables error: missing colon "
+                                      "in local variables entry: '%s'" % line)
+                            continue
+                        # Do NOT lowercase the variable name, because Emacs only
+                        # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
+                        value = value.strip()
+                        if value.endswith('\\'):
+                            value = value[:-1].rstrip()
+                            continued_for = variable
+                        else:
+                            continued_for = None
+                        emacs_vars[variable] = value
+
+        # Unquote values.
+        for var, val in list(emacs_vars.items()):
+            if len(val) > 1 and (val.startswith('"') and val.endswith('"')
+               or val.startswith('"') and val.endswith('"')):
+                emacs_vars[var] = val[1:-1]
+
+        return emacs_vars
+
+    def _detab_line(self, line):
+        r"""Recusively convert tabs to spaces in a single line.
+
+        Called from _detab()."""
+        if '\t' not in line:
+            return line
+        chunk1, chunk2 = line.split('\t', 1)
+        chunk1 += (' ' * (self.tab_width - len(chunk1) % self.tab_width))
+        output = chunk1 + chunk2
+        return self._detab_line(output)
+
+    def _detab(self, text):
+        r"""Iterate text line by line and convert tabs to spaces.
+
+            >>> m = Markdown()
+            >>> m._detab("\tfoo")
+            '    foo'
+            >>> m._detab("  \tfoo")
+            '    foo'
+            >>> m._detab("\t  foo")
+            '      foo'
+            >>> m._detab("  foo")
+            '  foo'
+            >>> m._detab("  foo\n\tbar\tblam")
+            '  foo\n    bar blam'
+        """
+        if '\t' not in text:
+            return text
+        output = []
+        for line in text.splitlines():
+            output.append(self._detab_line(line))
+        return '\n'.join(output)
+
+    # I broke out the html5 tags here and add them to _block_tags_a and
+    # _block_tags_b.  This way html5 tags are easy to keep track of.
+    _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
+
+    _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
+    _block_tags_a += _html5tags
+
+    _strict_tag_block_re = re.compile(r"""
+        (                       # save in \1
+            ^                   # start of line  (with re.M)
+            <(%s)               # start tag = \2
+            \b                  # word break
+            (.*\n)*?            # any number of lines, minimally matching
+            </\2>               # the matching end tag
+            [ \t]*              # trailing spaces/tabs
+            (?=\n+|\Z)          # followed by a newline or end of document
+        )
+        """ % _block_tags_a,
+        re.X | re.M)
+
+    _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
+    _block_tags_b += _html5tags
+
+    _liberal_tag_block_re = re.compile(r"""
+        (                       # save in \1
+            ^                   # start of line  (with re.M)
+            <(%s)               # start tag = \2
+            \b                  # word break
+            (.*\n)*?            # any number of lines, minimally matching
+            .*</\2>             # the matching end tag
+            [ \t]*              # trailing spaces/tabs
+            (?=\n+|\Z)          # followed by a newline or end of document
+        )
+        """ % _block_tags_b,
+        re.X | re.M)
+
+    _html_markdown_attr_re = re.compile(
+        r'''\s+markdown=("1"|'1')''')
+    def _hash_html_block_sub(self, match, raw=False):
+        html = match.group(1)
+        if raw and self.safe_mode:
+            html = self._sanitize_html(html)
+        elif 'markdown-in-html' in self.extras and 'markdown=' in html:
+            first_line = html.split('\n', 1)[0]
+            m = self._html_markdown_attr_re.search(first_line)
+            if m:
+                lines = html.split('\n')
+                middle = '\n'.join(lines[1:-1])
+                last_line = lines[-1]
+                first_line = first_line[:m.start()] + first_line[m.end():]
+                f_key = _hash_text(first_line)
+                self.html_blocks[f_key] = first_line
+                l_key = _hash_text(last_line)
+                self.html_blocks[l_key] = last_line
+                return ''.join(["\n\n", f_key,
+                    "\n\n", middle, "\n\n",
+                    l_key, "\n\n"])
+        key = _hash_text(html)
+        self.html_blocks[key] = html
+        return "\n\n" + key + "\n\n"
+
+    def _hash_html_blocks(self, text, raw=False):
+        """Hashify HTML blocks
+
+        We only want to do this for block-level HTML tags, such as headers,
+        lists, and tables. That's because we still want to wrap <p>s around
+        "paragraphs" that are wrapped in non-block-level tags, such as anchors,
+        phrase emphasis, and spans. The list of tags we're looking for is
+        hard-coded.
+
+        @param raw {boolean} indicates if these are raw HTML blocks in
+            the original source. It makes a difference in "safe" mode.
+        """
+        if '<' not in text:
+            return text
+
+        # Pass `raw` value into our calls to self._hash_html_block_sub.
+        hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
+
+        # First, look for nested blocks, e.g.:
+        #   <div>
+        #       <div>
+        #       tags for inner block must be indented.
+        #       </div>
+        #   </div>
+        #
+        # The outermost tags must start at the left margin for this to match, and
+        # the inner nested divs must be indented.
+        # We need to do this before the next, more liberal match, because the next
+        # match will start at the first `<div>` and stop at the first `</div>`.
+        text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
+
+        # Now match more liberally, simply from `\n<tag>` to `</tag>\n`
+        text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
+
+        # Special case just for <hr />. It was easier to make a special
+        # case than to make the other regex more complicated.
+        if "<hr" in text:
+            _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
+            text = _hr_tag_re.sub(hash_html_block_sub, text)
+
+        # Special case for standalone HTML comments:
+        if "<!--" in text:
+            start = 0
+            while True:
+                # Delimiters for next comment block.
+                try:
+                    start_idx = text.index("<!--", start)
+                except ValueError:
+                    break
+                try:
+                    end_idx = text.index("-->", start_idx) + 3
+                except ValueError:
+                    break
+
+                # Start position for next comment block search.
+                start = end_idx
+
+                # Validate whitespace before comment.
+                if start_idx:
+                    # - Up to `tab_width - 1` spaces before start_idx.
+                    for i in range(self.tab_width - 1):
+                        if text[start_idx - 1] != ' ':
+                            break
+                        start_idx -= 1
+                        if start_idx == 0:
+                            break
+                    # - Must be preceded by 2 newlines or hit the start of
+                    #   the document.
+                    if start_idx == 0:
+                        pass
+                    elif start_idx == 1 and text[0] == '\n':
+                        start_idx = 0  # to match minute detail of Markdown.pl regex
+                    elif text[start_idx-2:start_idx] == '\n\n':
+                        pass
+                    else:
+                        break
+
+                # Validate whitespace after comment.
+                # - Any number of spaces and tabs.
+                while end_idx < len(text):
+                    if text[end_idx] not in ' \t':
+                        break
+                    end_idx += 1
+                # - Must be following by 2 newlines or hit end of text.
+                if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
+                    continue
+
+                # Escape and hash (must match `_hash_html_block_sub`).
+                html = text[start_idx:end_idx]
+                if raw and self.safe_mode:
+                    html = self._sanitize_html(html)
+                key = _hash_text(html)
+                self.html_blocks[key] = html
+                text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
+
+        if "xml" in self.extras:
+            # Treat XML processing instructions and namespaced one-liner
+            # tags as if they were block HTML tags. E.g., if standalone
+            # (i.e. are their own paragraph), the following do not get
+            # wrapped in a <p> tag:
+            #    <?foo bar?>
+            #
+            #    <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
+            _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
+            text = _xml_oneliner_re.sub(hash_html_block_sub, text)
+
+        return text
+
+    def _strip_link_definitions(self, text):
+        # Strips link definitions from text, stores the URLs and titles in
+        # hash references.
+        less_than_tab = self.tab_width - 1
+
+        # Link defs are in the form:
+        #   [id]: url "optional title"
+        _link_def_re = re.compile(r"""
+            ^[ ]{0,%d}\[(.+)\]: # id = \1
+              [ \t]*
+              \n?               # maybe *one* newline
+              [ \t]*
+            <?(.+?)>?           # url = \2
+              [ \t]*
+            (?:
+                \n?             # maybe one newline
+                [ \t]*
+                (?<=\s)         # lookbehind for whitespace
+                ['"(]
+                ([^\n]*)        # title = \3
+                ['")]
+                [ \t]*
+            )?  # title is optional
+            (?:\n+|\Z)
+            """ % less_than_tab, re.X | re.M | re.U)
+        return _link_def_re.sub(self._extract_link_def_sub, text)
+
+    def _extract_link_def_sub(self, match):
+        id, url, title = match.groups()
+        key = id.lower()    # Link IDs are case-insensitive
+        self.urls[key] = self._encode_amps_and_angles(url)
+        if title:
+            self.titles[key] = title
+        return ""
+
+    def _do_numbering(self, text):
+        ''' We handle the special extension for generic numbering for
+            tables, figures etc.
+        '''
+        # First pass to define all the references
+        self.regex_defns = re.compile(r'''
+            \[\#(\w+)\s* # the counter.  Open square plus hash plus a word \1
+            ([^@]*)\s*   # Some optional characters, that aren't an @. \2
+            @(\w+)       # the id.  Should this be normed? \3
+            ([^\]]*)\]   # The rest of the text up to the terminating ] \4
+            ''', re.VERBOSE)
+        self.regex_subs = re.compile(r"\[@(\w+)\s*\]")  # [@ref_id]
+        counters = {}
+        references = {}
+        replacements = []
+        definition_html = '<figcaption class="{}" id="counter-ref-{}">{}{}{}</figcaption>'
+        reference_html = '<a class="{}" href="#counter-ref-{}">{}</a>'
+        for match in self.regex_defns.finditer(text):
+            # We must have four match groups otherwise this isn't a numbering reference
+            if len(match.groups()) != 4:
+                continue
+            counter = match.group(1)
+            text_before = match.group(2)
+            ref_id = match.group(3)
+            text_after = match.group(4)
+            number = counters.get(counter, 1)
+            references[ref_id] = (number, counter)
+            replacements.append((match.start(0),
+                                 definition_html.format(counter,
+                                                        ref_id,
+                                                        text_before,
+                                                        number,
+                                                        text_after),
+                                 match.end(0)))
+            counters[counter] = number + 1
+        for repl in reversed(replacements):
+            text = text[:repl[0]] + repl[1] + text[repl[2]:]
+
+        # Second pass to replace the references with the right
+        # value of the counter
+        # Fwiw, it's vaguely annoying to have to turn the iterator into
+        # a list and then reverse it but I can't think of a better thing to do.
+        for match in reversed(list(self.regex_subs.finditer(text))):
+            number, counter = references.get(match.group(1), (None, None))
+            if number is not None:
+                repl = reference_html.format(counter,
+                                             match.group(1),
+                                             number)
+            else:
+                repl = reference_html.format(match.group(1),
+                                             'countererror',
+                                             '?' + match.group(1) + '?')
+            if "smarty-pants" in self.extras:
+                repl = repl.replace('"', self._escape_table['"'])
+
+            text = text[:match.start()] + repl + text[match.end():]
+        return text
+
+    def _extract_footnote_def_sub(self, match):
+        id, text = match.groups()
+        text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
+        normed_id = re.sub(r'\W', '-', id)
+        # Ensure footnote text ends with a couple newlines (for some
+        # block gamut matches).
+        self.footnotes[normed_id] = text + "\n\n"
+        return ""
+
+    def _strip_footnote_definitions(self, text):
+        """A footnote definition looks like this:
+
+            [^note-id]: Text of the note.
+
+                May include one or more indented paragraphs.
+
+        Where,
+        - The 'note-id' can be pretty much anything, though typically it
+          is the number of the footnote.
+        - The first paragraph may start on the next line, like so:
+
+            [^note-id]:
+                Text of the note.
+        """
+        less_than_tab = self.tab_width - 1
+        footnote_def_re = re.compile(r'''
+            ^[ ]{0,%d}\[\^(.+)\]:   # id = \1
+            [ \t]*
+            (                       # footnote text = \2
+              # First line need not start with the spaces.
+              (?:\s*.*\n+)
+              (?:
+                (?:[ ]{%d} | \t)  # Subsequent lines must be indented.
+                .*\n+
+              )*
+            )
+            # Lookahead for non-space at line-start, or end of doc.
+            (?:(?=^[ ]{0,%d}\S)|\Z)
+            ''' % (less_than_tab, self.tab_width, self.tab_width),
+            re.X | re.M)
+        return footnote_def_re.sub(self._extract_footnote_def_sub, text)
+
+    _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
+
+    def _run_block_gamut(self, text):
+        # These are all the transformations that form block-level
+        # tags like paragraphs, headers, and list items.
+
+        if "fenced-code-blocks" in self.extras:
+            text = self._do_fenced_code_blocks(text)
+
+        text = self._do_headers(text)
+
+        # Do Horizontal Rules:
+        # On the number of spaces in horizontal rules: The spec is fuzzy: "If
+        # you wish, you may use spaces between the hyphens or asterisks."
+        # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
+        # hr chars to one or two. We'll reproduce that limit here.
+        hr = "\n<hr"+self.empty_element_suffix+"\n"
+        text = re.sub(self._hr_re, hr, text)
+
+        text = self._do_lists(text)
+
+        if "pyshell" in self.extras:
+            text = self._prepare_pyshell_blocks(text)
+        if "wiki-tables" in self.extras:
+            text = self._do_wiki_tables(text)
+        if "tables" in self.extras:
+            text = self._do_tables(text)
+
+        text = self._do_code_blocks(text)
+
+        text = self._do_block_quotes(text)
+
+        # We already ran _HashHTMLBlocks() before, in Markdown(), but that
+        # was to escape raw HTML in the original Markdown source. This time,
+        # we're escaping the markup we've just created, so that we don't wrap
+        # <p> tags around block-level tags.
+        text = self._hash_html_blocks(text)
+
+        text = self._form_paragraphs(text)
+
+        return text
+
+    def _pyshell_block_sub(self, match):
+        lines = match.group(0).splitlines(0)
+        _dedentlines(lines)
+        indent = ' ' * self.tab_width
+        s = ('\n'  # separate from possible cuddled paragraph
+             + indent + ('\n'+indent).join(lines)
+             + '\n\n')
+        return s
+
+    def _prepare_pyshell_blocks(self, text):
+        """Ensure that Python interactive shell sessions are put in
+        code blocks -- even if not properly indented.
+        """
+        if ">>>" not in text:
+            return text
+
+        less_than_tab = self.tab_width - 1
+        _pyshell_block_re = re.compile(r"""
+            ^([ ]{0,%d})>>>[ ].*\n   # first line
+            ^(\1.*\S+.*\n)*         # any number of subsequent lines
+            ^\n                     # ends with a blank line
+            """ % less_than_tab, re.M | re.X)
+
+        return _pyshell_block_re.sub(self._pyshell_block_sub, text)
+
+    def _table_sub(self, match):
+        trim_space_re = '^[ \t\n]+|[ \t\n]+$'
+        trim_bar_re = '^\||\|$'
+
+        head, underline, body = match.groups()
+
+        # Determine aligns for columns.
+        cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", underline)).split('|')]
+        align_from_col_idx = {}
+        for col_idx, col in enumerate(cols):
+            if col[0] == ':' and col[-1] == ':':
+                align_from_col_idx[col_idx] = ' align="center"'
+            elif col[0] == ':':
+                align_from_col_idx[col_idx] = ' align="left"'
+            elif col[-1] == ':':
+                align_from_col_idx[col_idx] = ' align="right"'
+
+        # thead
+        hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<thead>', '<tr>']
+        cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", head)).split('|')]
+        for col_idx, col in enumerate(cols):
+            hlines.append('  <th%s>%s</th>' % (
+                align_from_col_idx.get(col_idx, ''),
+                self._run_span_gamut(col)
+            ))
+        hlines.append('</tr>')
+        hlines.append('</thead>')
+
+        # tbody
+        hlines.append('<tbody>')
+        for line in body.strip('\n').split('\n'):
+            hlines.append('<tr>')
+            cols = [cell.strip() for cell in re.sub(trim_bar_re, "", re.sub(trim_space_re, "", line)).split('|')]
+            for col_idx, col in enumerate(cols):
+                hlines.append('  <td%s>%s</td>' % (
+                    align_from_col_idx.get(col_idx, ''),
+                    self._run_span_gamut(col)
+                ))
+            hlines.append('</tr>')
+        hlines.append('</tbody>')
+        hlines.append('</table>')
+
+        return '\n'.join(hlines) + '\n'
+
+    def _do_tables(self, text):
+        """Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
+        https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
+        """
+        less_than_tab = self.tab_width - 1
+        table_re = re.compile(r'''
+                (?:(?<=\n\n)|\A\n?)             # leading blank line
+
+                ^[ ]{0,%d}                      # allowed whitespace
+                (.*[|].*)  \n                   # $1: header row (at least one pipe)
+
+                ^[ ]{0,%d}                      # allowed whitespace
+                (                               # $2: underline row
+                    # underline row with leading bar
+                    (?:  \|\ *:?-+:?\ *  )+  \|?  \n
+                    |
+                    # or, underline row without leading bar
+                    (?:  \ *:?-+:?\ *\|  )+  (?:  \ *:?-+:?\ *  )?  \n
+                )
+
+                (                               # $3: data rows
+                    (?:
+                        ^[ ]{0,%d}(?!\ )         # ensure line begins with 0 to less_than_tab spaces
+                        .*\|.*  \n
+                    )+
+                )
+            ''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
+        return table_re.sub(self._table_sub, text)
+
+    def _wiki_table_sub(self, match):
+        ttext = match.group(0).strip()
+        # print 'wiki table: %r' % match.group(0)
+        rows = []
+        for line in ttext.splitlines(0):
+            line = line.strip()[2:-2].strip()
+            row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
+            rows.append(row)
+        # pprint(rows)
+        hlines = ['<table%s>' % self._html_class_str_from_tag('table'), '<tbody>']
+        for row in rows:
+            hrow = ['<tr>']
+            for cell in row:
+                hrow.append('<td>')
+                hrow.append(self._run_span_gamut(cell))
+                hrow.append('</td>')
+            hrow.append('</tr>')
+            hlines.append(''.join(hrow))
+        hlines += ['</tbody>', '</table>']
+        return '\n'.join(hlines) + '\n'
+
+    def _do_wiki_tables(self, text):
+        # Optimization.
+        if "||" not in text:
+            return text
+
+        less_than_tab = self.tab_width - 1
+        wiki_table_re = re.compile(r'''
+            (?:(?<=\n\n)|\A\n?)            # leading blank line
+            ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n  # first line
+            (^\1\|\|.+?\|\|\n)*        # any number of subsequent lines
+            ''' % less_than_tab, re.M | re.X)
+        return wiki_table_re.sub(self._wiki_table_sub, text)
+
+    def _run_span_gamut(self, text):
+        # These are all the transformations that occur *within* block-level
+        # tags like paragraphs, headers, and list items.
+
+        text = self._do_code_spans(text)
+
+        text = self._escape_special_chars(text)
+
+        # Process anchor and image tags.
+        text = self._do_links(text)
+
+        # Make links out of things like `<http://example.com/>`
+        # Must come after _do_links(), because you can use < and >
+        # delimiters in inline links like [this](<url>).
+        text = self._do_auto_links(text)
+
+        if "link-patterns" in self.extras:
+            text = self._do_link_patterns(text)
+
+        text = self._encode_amps_and_angles(text)
+
+        if "strike" in self.extras:
+            text = self._do_strike(text)
+
+        text = self._do_italics_and_bold(text)
+
+        if "smarty-pants" in self.extras:
+            text = self._do_smart_punctuation(text)
+
+        # Do hard breaks:
+        if "break-on-newline" in self.extras:
+            text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
+        else:
+            text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
+
+        return text
+
+    # "Sorta" because auto-links are identified as "tag" tokens.
+    _sorta_html_tokenize_re = re.compile(r"""
+        (
+            # tag
+            </?
+            (?:\w+)                                     # tag name
+            (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))*  # attributes
+            \s*/?>
+            |
+            # auto-link (e.g., <http://www.activestate.com/>)
+            <\w+[^>]*>
+            |
+            <!--.*?-->      # comment
+            |
+            <\?.*?\?>       # processing instruction
+        )
+        """, re.X)
+
+    def _escape_special_chars(self, text):
+        # Python markdown note: the HTML tokenization here differs from
+        # that in Markdown.pl, hence the behaviour for subtle cases can
+        # differ (I believe the tokenizer here does a better job because
+        # it isn't susceptible to unmatched '<' and '>' in HTML tags).
+        # Note, however, that '>' is not allowed in an auto-link URL
+        # here.
+        escaped = []
+        is_html_markup = False
+        for token in self._sorta_html_tokenize_re.split(text):
+            if is_html_markup:
+                # Within tags/HTML-comments/auto-links, encode * and _
+                # so they don't conflict with their use in Markdown for
+                # italics and strong.  We're replacing each such
+                # character with its corresponding MD5 checksum value;
+                # this is likely overkill, but it should prevent us from
+                # colliding with the escape values by accident.
+                escaped.append(token.replace('*', self._escape_table['*'])
+                                    .replace('_', self._escape_table['_']))
+            else:
+                escaped.append(self._encode_backslash_escapes(token))
+            is_html_markup = not is_html_markup
+        return ''.join(escaped)
+
+    def _hash_html_spans(self, text):
+        # Used for safe_mode.
+
+        def _is_auto_link(s):
+            if ':' in s and self._auto_link_re.match(s):
+                return True
+            elif '@' in s and self._auto_email_link_re.match(s):
+                return True
+            return False
+
+        tokens = []
+        is_html_markup = False
+        for token in self._sorta_html_tokenize_re.split(text):
+            if is_html_markup and not _is_auto_link(token):
+                sanitized = self._sanitize_html(token)
+                key = _hash_text(sanitized)
+                self.html_spans[key] = sanitized
+                tokens.append(key)
+            else:
+                tokens.append(token)
+            is_html_markup = not is_html_markup
+        return ''.join(tokens)
+
+    def _unhash_html_spans(self, text):
+        for key, sanitized in list(self.html_spans.items()):
+            text = text.replace(key, sanitized)
+        return text
+
+    def _sanitize_html(self, s):
+        if self.safe_mode == "replace":
+            return self.html_removed_text
+        elif self.safe_mode == "escape":
+            replacements = [
+                ('&', '&amp;'),
+                ('<', '&lt;'),
+                ('>', '&gt;'),
+            ]
+            for before, after in replacements:
+                s = s.replace(before, after)
+            return s
+        else:
+            raise MarkdownError("invalid value for 'safe_mode': %r (must be "
+                                "'escape' or 'replace')" % self.safe_mode)
+
+    _inline_link_title = re.compile(r'''
+            (                   # \1
+              [ \t]+
+              (['"])            # quote char = \2
+              (?P<title>.*?)
+              \2
+            )?                  # title is optional
+          \)$
+        ''', re.X | re.S)
+    _tail_of_reference_link_re = re.compile(r'''
+          # Match tail of: [text][id]
+          [ ]?          # one optional space
+          (?:\n[ ]*)?   # one optional newline followed by spaces
+          \[
+            (?P<id>.*?)
+          \]
+        ''', re.X | re.S)
+
+    _whitespace = re.compile(r'\s*')
+
+    _strip_anglebrackets = re.compile(r'<(.*)>.*')
+
+    def _find_non_whitespace(self, text, start):
+        """Returns the index of the first non-whitespace character in text
+        after (and including) start
+        """
+        match = self._whitespace.match(text, start)
+        return match.end()
+
+    def _find_balanced(self, text, start, open_c, close_c):
+        """Returns the index where the open_c and close_c characters balance
+        out - the same number of open_c and close_c are encountered - or the
+        end of string if it's reached before the balance point is found.
+        """
+        i = start
+        l = len(text)
+        count = 1
+        while count > 0 and i < l:
+            if text[i] == open_c:
+                count += 1
+            elif text[i] == close_c:
+                count -= 1
+            i += 1
+        return i
+
+    def _extract_url_and_title(self, text, start):
+        """Extracts the url and (optional) title from the tail of a link"""
+        # text[start] equals the opening parenthesis
+        idx = self._find_non_whitespace(text, start+1)
+        if idx == len(text):
+            return None, None, None
+        end_idx = idx
+        has_anglebrackets = text[idx] == "<"
+        if has_anglebrackets:
+            end_idx = self._find_balanced(text, end_idx+1, "<", ">")
+        end_idx = self._find_balanced(text, end_idx, "(", ")")
+        match = self._inline_link_title.search(text, idx, end_idx)
+        if not match:
+            return None, None, None
+        url, title = text[idx:match.start()], match.group("title")
+        if has_anglebrackets:
+            url = self._strip_anglebrackets.sub(r'\1', url)
+        return url, title, end_idx
+
+    _safe_protocols = re.compile(r'(https?|ftp):', re.I)
+    def _do_links(self, text):
+        """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
+
+        This is a combination of Markdown.pl's _DoAnchors() and
+        _DoImages(). They are done together because that simplified the
+        approach. It was necessary to use a different approach than
+        Markdown.pl because of the lack of atomic matching support in
+        Python's regex engine used in $g_nested_brackets.
+        """
+        MAX_LINK_TEXT_SENTINEL = 3000  # markdown2 issue 24
+
+        # `anchor_allowed_pos` is used to support img links inside
+        # anchors, but not anchors inside anchors. An anchor's start
+        # pos must be `>= anchor_allowed_pos`.
+        anchor_allowed_pos = 0
+
+        curr_pos = 0
+        while True:  # Handle the next link.
+            # The next '[' is the start of:
+            # - an inline anchor:   [text](url "title")
+            # - a reference anchor: [text][id]
+            # - an inline img:      ![text](url "title")
+            # - a reference img:    ![text][id]
+            # - a footnote ref:     [^id]
+            #   (Only if 'footnotes' extra enabled)
+            # - a footnote defn:    [^id]: ...
+            #   (Only if 'footnotes' extra enabled) These have already
+            #   been stripped in _strip_footnote_definitions() so no
+            #   need to watch for them.
+            # - a link definition:  [id]: url "title"
+            #   These have already been stripped in
+            #   _strip_link_definitions() so no need to watch for them.
+            # - not markup:         [...anything else...
+            try:
+                start_idx = text.index('[', curr_pos)
+            except ValueError:
+                break
+            text_length = len(text)
+
+            # Find the matching closing ']'.
+            # Markdown.pl allows *matching* brackets in link text so we
+            # will here too. Markdown.pl *doesn't* currently allow
+            # matching brackets in img alt text -- we'll differ in that
+            # regard.
+            bracket_depth = 0
+            for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
+                                            text_length)):
+                ch = text[p]
+                if ch == ']':
+                    bracket_depth -= 1
+                    if bracket_depth < 0:
+                        break
+                elif ch == '[':
+                    bracket_depth += 1
+            else:
+                # Closing bracket not found within sentinel length.
+                # This isn't markup.
+                curr_pos = start_idx + 1
+                continue
+            link_text = text[start_idx+1:p]
+
+            # Possibly a footnote ref?
+            if "footnotes" in self.extras and link_text.startswith("^"):
+                normed_id = re.sub(r'\W', '-', link_text[1:])
+                if normed_id in self.footnotes:
+                    self.footnote_ids.append(normed_id)
+                    result = '<sup class="footnote-ref" id="fnref-%s">' \
+                             '<a href="#fn-%s">%s</a></sup>' \
+                             % (normed_id, normed_id, len(self.footnote_ids))
+                    text = text[:start_idx] + result + text[p+1:]
+                else:
+                    # This id isn't defined, leave the markup alone.
+                    curr_pos = p+1
+                continue
+
+            # Now determine what this is by the remainder.
+            p += 1
+            if p == text_length:
+                return text
+
+            # Inline anchor or img?
+            if text[p] == '(':  # attempt at perf improvement
+                url, title, url_end_idx = self._extract_url_and_title(text, p)
+                if url is not None:
+                    # Handle an inline anchor or img.
+                    is_img = start_idx > 0 and text[start_idx-1] == "!"
+                    if is_img:
+                        start_idx -= 1
+
+                    # We've got to encode these to avoid conflicting
+                    # with italics/bold.
+                    url = url.replace('*', self._escape_table['*']) \
+                             .replace('_', self._escape_table['_'])
+                    if title:
+                        title_str = ' title="%s"' % (
+                            _xml_escape_attr(title)
+                                .replace('*', self._escape_table['*'])
+                                .replace('_', self._escape_table['_']))
+                    else:
+                        title_str = ''
+                    if is_img:
+                        img_class_str = self._html_class_str_from_tag("img")
+                        result = '<img src="%s" alt="%s"%s%s%s' \
+                            % (_urlencode(url, safe_mode=self.safe_mode),
+                               _xml_escape_attr(link_text),
+                               title_str,
+                               img_class_str,
+                               self.empty_element_suffix)
+                        if "smarty-pants" in self.extras:
+                            result = result.replace('"', self._escape_table['"'])
+                        curr_pos = start_idx + len(result)
+                        text = text[:start_idx] + result + text[url_end_idx:]
+                    elif start_idx >= anchor_allowed_pos:
+                        if self.safe_mode and not self._safe_protocols.match(url):
+                            result_head = '<a href="#"%s>' % (title_str)
+                        else:
+                            result_head = '<a href="%s"%s>' % (_urlencode(url, safe_mode=self.safe_mode), title_str)
+                        result = '%s%s</a>' % (result_head, _xml_escape_attr(link_text))
+                        if "smarty-pants" in self.extras:
+                            result = result.replace('"', self._escape_table['"'])
+                        # <img> allowed from curr_pos on, <a> from
+                        # anchor_allowed_pos on.
+                        curr_pos = start_idx + len(result_head)
+                        anchor_allowed_pos = start_idx + len(result)
+                        text = text[:start_idx] + result + text[url_end_idx:]
+                    else:
+                        # Anchor not allowed here.
+                        curr_pos = start_idx + 1
+                    continue
+
+            # Reference anchor or img?
+            else:
+                match = self._tail_of_reference_link_re.match(text, p)
+                if match:
+                    # Handle a reference-style anchor or img.
+                    is_img = start_idx > 0 and text[start_idx-1] == "!"
+                    if is_img:
+                        start_idx -= 1
+                    link_id = match.group("id").lower()
+                    if not link_id:
+                        link_id = link_text.lower()  # for links like [this][]
+                    if link_id in self.urls:
+                        url = self.urls[link_id]
+                        # We've got to encode these to avoid conflicting
+                        # with italics/bold.
+                        url = url.replace('*', self._escape_table['*']) \
+                                 .replace('_', self._escape_table['_'])
+                        title = self.titles.get(link_id)
+                        if title:
+                            title = _xml_escape_attr(title) \
+                                .replace('*', self._escape_table['*']) \
+                                .replace('_', self._escape_table['_'])
+                            title_str = ' title="%s"' % title
+                        else:
+                            title_str = ''
+                        if is_img:
+                            img_class_str = self._html_class_str_from_tag("img")
+                            result = '<img src="%s" alt="%s"%s%s%s' \
+                                % (_urlencode(url, safe_mode=self.safe_mode),
+                                   _xml_escape_attr(link_text),
+                                   title_str,
+                                   img_class_str,
+                                   self.empty_element_suffix)
+                            if "smarty-pants" in self.extras:
+                                result = result.replace('"', self._escape_table['"'])
+                            curr_pos = start_idx + len(result)
+                            text = text[:start_idx] + result + text[match.end():]
+                        elif start_idx >= anchor_allowed_pos:
+                            if self.safe_mode and not self._safe_protocols.match(url):
+                                result_head = '<a href="#"%s>' % (title_str)
+                            else:
+                                result_head = '<a href="%s"%s>' % (_urlencode(url, safe_mode=self.safe_mode), title_str)
+                            result = '%s%s</a>' % (result_head, link_text)
+                            if "smarty-pants" in self.extras:
+                                result = result.replace('"', self._escape_table['"'])
+                            # <img> allowed from curr_pos on, <a> from
+                            # anchor_allowed_pos on.
+                            curr_pos = start_idx + len(result_head)
+                            anchor_allowed_pos = start_idx + len(result)
+                            text = text[:start_idx] + result + text[match.end():]
+                        else:
+                            # Anchor not allowed here.
+                            curr_pos = start_idx + 1
+                    else:
+                        # This id isn't defined, leave the markup alone.
+                        curr_pos = match.end()
+                    continue
+
+            # Otherwise, it isn't markup.
+            curr_pos = start_idx + 1
+
+        return text
+
+    def header_id_from_text(self, text, prefix, n):
+        """Generate a header id attribute value from the given header
+        HTML content.
+
+        This is only called if the "header-ids" extra is enabled.
+        Subclasses may override this for different header ids.
+
+        @param text {str} The text of the header tag
+        @param prefix {str} The requested prefix for header ids. This is the
+            value of the "header-ids" extra key, if any. Otherwise, None.
+        @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
+        @returns {str} The value for the header tag's "id" attribute. Return
+            None to not have an id attribute and to exclude this header from
+            the TOC (if the "toc" extra is specified).
+        """
+        header_id = _slugify(text)
+        if prefix and isinstance(prefix, base_string_type):
+            header_id = prefix + '-' + header_id
+        if header_id in self._count_from_header_id:
+            self._count_from_header_id[header_id] += 1
+            header_id += '-%s' % self._count_from_header_id[header_id]
+        else:
+            self._count_from_header_id[header_id] = 1
+        return header_id
+
+    _toc = None
+    def _toc_add_entry(self, level, id, name):
+        if self._toc is None:
+            self._toc = []
+        self._toc.append((level, id, self._unescape_special_chars(name)))
+
+    _h_re_base = r'''
+        (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
+        |
+        (^(\#{1,6})  # \1 = string of #'s
+        [ \t]%s
+        (.+?)       # \2 = Header text
+        [ \t]*
+        (?<!\\)     # ensure not an escaped trailing '#'
+        \#*         # optional closing #'s (not counted)
+        \n+
+        )
+        '''
+
+    _h_re = re.compile(_h_re_base % '*', re.X | re.M)
+    _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
+
+    def _h_sub(self, match):
+        if match.group(1) is not None:
+            # Setext header
+            n = {"=": 1, "-": 2}[match.group(3)[0]]
+            header_group = match.group(2)
+        else:
+            # atx header
+            n = len(match.group(5))
+            header_group = match.group(6)
+
+        demote_headers = self.extras.get("demote-headers")
+        if demote_headers:
+            n = min(n + demote_headers, 6)
+        header_id_attr = ""
+        if "header-ids" in self.extras:
+            header_id = self.header_id_from_text(header_group,
+                self.extras["header-ids"], n)
+            if header_id:
+                header_id_attr = ' id="%s"' % header_id
+        html = self._run_span_gamut(header_group)
+        if "toc" in self.extras and header_id:
+            self._toc_add_entry(n, header_id, html)
+        return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
+
+    def _do_headers(self, text):
+        # Setext-style headers:
+        #     Header 1
+        #     ========
+        #
+        #     Header 2
+        #     --------
+
+        # atx-style headers:
+        #   # Header 1
+        #   ## Header 2
+        #   ## Header 2 with closing hashes ##
+        #   ...
+        #   ###### Header 6
+
+        if 'tag-friendly' in self.extras:
+            return self._h_re_tag_friendly.sub(self._h_sub, text)
+        return self._h_re.sub(self._h_sub, text)
+
+    _marker_ul_chars = '*+-'
+    _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
+    _marker_ul = '(?:[%s])' % _marker_ul_chars
+    _marker_ol = r'(?:\d+\.)'
+
+    def _list_sub(self, match):
+        lst = match.group(1)
+        lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
+        result = self._process_list_items(lst)
+        if self.list_level:
+            return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
+        else:
+            return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
+
+    def _do_lists(self, text):
+        # Form HTML ordered (numbered) and unordered (bulleted) lists.
+
+        # Iterate over each *non-overlapping* list match.
+        pos = 0
+        while True:
+            # Find the *first* hit for either list style (ul or ol). We
+            # match ul and ol separately to avoid adjacent lists of different
+            # types running into each other (see issue #16).
+            hits = []
+            for marker_pat in (self._marker_ul, self._marker_ol):
+                less_than_tab = self.tab_width - 1
+                whole_list = r'''
+                    (                   # \1 = whole list
+                      (                 # \2
+                        [ ]{0,%d}
+                        (%s)            # \3 = first list item marker
+                        [ \t]+
+                        (?!\ *\3\ )     # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
+                      )
+                      (?:.+?)
+                      (                 # \4
+                          \Z
+                        |
+                          \n{2,}
+                          (?=\S)
+                          (?!           # Negative lookahead for another list item marker
+                            [ \t]*
+                            %s[ \t]+
+                          )
+                      )
+                    )
+                ''' % (less_than_tab, marker_pat, marker_pat)
+                if self.list_level:  # sub-list
+                    list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
+                else:
+                    list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
+                                         re.X | re.M | re.S)
+                match = list_re.search(text, pos)
+                if match:
+                    hits.append((match.start(), match))
+            if not hits:
+                break
+            hits.sort()
+            match = hits[0][1]
+            start, end = match.span()
+            middle = self._list_sub(match)
+            text = text[:start] + middle + text[end:]
+            pos = start + len(middle)  # start pos for next attempted match
+
+        return text
+
+    _list_item_re = re.compile(r'''
+        (\n)?                   # leading line = \1
+        (^[ \t]*)               # leading whitespace = \2
+        (?P<marker>%s) [ \t]+   # list marker = \3
+        ((?:.+?)                # list item text = \4
+        (\n{1,2}))              # eols = \5
+        (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
+        ''' % (_marker_any, _marker_any),
+        re.M | re.X | re.S)
+
+    _task_list_item_re = re.compile(r'''
+        (\[[\ x]\])[ \t]+       # tasklist marker = \1
+        (.*)                   # list item text = \2
+    ''', re.M | re.X | re.S)
+
+    _task_list_warpper_str = r'<p><input type="checkbox" class="task-list-item-checkbox" %sdisabled>%s</p>'
+
+    def _task_list_item_sub(self, match):
+        marker = match.group(1)
+        item_text = match.group(2)
+        if marker == '[x]':
+                return self._task_list_warpper_str % ('checked ', item_text)
+        elif marker == '[ ]':
+                return self._task_list_warpper_str % ('', item_text)
+
+    _last_li_endswith_two_eols = False
+    def _list_item_sub(self, match):
+        item = match.group(4)
+        leading_line = match.group(1)
+        if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
+            item = self._run_block_gamut(self._outdent(item))
+        else:
+            # Recursion for sub-lists:
+            item = self._do_lists(self._outdent(item))
+            if item.endswith('\n'):
+                item = item[:-1]
+            item = self._run_span_gamut(item)
+        self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
+
+        if "task_list" in self.extras:
+            item = self._task_list_item_re.sub(self._task_list_item_sub, item)
+
+        return "<li>%s</li>\n" % item
+
+    def _process_list_items(self, list_str):
+        # Process the contents of a single ordered or unordered list,
+        # splitting it into individual list items.
+
+        # The $g_list_level global keeps track of when we're inside a list.
+        # Each time we enter a list, we increment it; when we leave a list,
+        # we decrement. If it's zero, we're not in a list anymore.
+        #
+        # We do this because when we're not inside a list, we want to treat
+        # something like this:
+        #
+        #       I recommend upgrading to version
+        #       8. Oops, now this line is treated
+        #       as a sub-list.
+        #
+        # As a single paragraph, despite the fact that the second line starts
+        # with a digit-period-space sequence.
+        #
+        # Whereas when we're inside a list (or sub-list), that line will be
+        # treated as the start of a sub-list. What a kludge, huh? This is
+        # an aspect of Markdown's syntax that's hard to parse perfectly
+        # without resorting to mind-reading. Perhaps the solution is to
+        # change the syntax rules such that sub-lists must start with a
+        # starting cardinal number; e.g. "1." or "a.".
+        self.list_level += 1
+        self._last_li_endswith_two_eols = False
+        list_str = list_str.rstrip('\n') + '\n'
+        list_str = self._list_item_re.sub(self._list_item_sub, list_str)
+        self.list_level -= 1
+        return list_str
+
+    def _get_pygments_lexer(self, lexer_name):
+        try:
+            from pygments import lexers, util
+        except ImportError:
+            return None
+        try:
+            return lexers.get_lexer_by_name(lexer_name)
+        except util.ClassNotFound:
+            return None
+
+    def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
+        import pygments
+        import pygments.formatters
+
+        class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
+            def _wrap_code(self, inner):
+                """A function for use in a Pygments Formatter which
+                wraps in <code> tags.
+                """
+                yield 0, "<code>"
+                for tup in inner:
+                    yield tup
+                yield 0, "</code>"
+
+            def wrap(self, source, outfile):
+                """Return the source with a code, pre, and div."""
+                return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
+
+        formatter_opts.setdefault("cssclass", "codehilite")
+        formatter = HtmlCodeFormatter(**formatter_opts)
+        return pygments.highlight(codeblock, lexer, formatter)
+
+    def _code_block_sub(self, match, is_fenced_code_block=False):
+        lexer_name = None
+        if is_fenced_code_block:
+            lexer_name = match.group(1)
+            if lexer_name:
+                formatter_opts = self.extras['fenced-code-blocks'] or {}
+            codeblock = match.group(2)
+            codeblock = codeblock[:-1]  # drop one trailing newline
+        else:
+            codeblock = match.group(1)
+            codeblock = self._outdent(codeblock)
+            codeblock = self._detab(codeblock)
+            codeblock = codeblock.lstrip('\n')  # trim leading newlines
+            codeblock = codeblock.rstrip()      # trim trailing whitespace
+
+            # Note: "code-color" extra is DEPRECATED.
+            if "code-color" in self.extras and codeblock.startswith(":::"):
+                lexer_name, rest = codeblock.split('\n', 1)
+                lexer_name = lexer_name[3:].strip()
+                codeblock = rest.lstrip("\n")   # Remove lexer declaration line.
+                formatter_opts = self.extras['code-color'] or {}
+
+        if lexer_name:
+            def unhash_code(codeblock):
+                for key, sanitized in list(self.html_spans.items()):
+                    codeblock = codeblock.replace(key, sanitized)
+                replacements = [
+                    ("&amp;", "&"),
+                    ("&lt;", "<"),
+                    ("&gt;", ">")
+                ]
+                for old, new in replacements:
+                    codeblock = codeblock.replace(old, new)
+                return codeblock
+            lexer = self._get_pygments_lexer(lexer_name)
+            if lexer:
+                codeblock = unhash_code( codeblock )
+                colored = self._color_with_pygments(codeblock, lexer,
+                                                    **formatter_opts)
+                return "\n\n%s\n\n" % colored
+
+        codeblock = self._encode_code(codeblock)
+        pre_class_str = self._html_class_str_from_tag("pre")
+        code_class_str = self._html_class_str_from_tag("code")
+        return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
+            pre_class_str, code_class_str, codeblock)
+
+    def _html_class_str_from_tag(self, tag):
+        """Get the appropriate ' class="..."' string (note the leading
+        space), if any, for the given tag.
+        """
+        if "html-classes" not in self.extras:
+            return ""
+        try:
+            html_classes_from_tag = self.extras["html-classes"]
+        except TypeError:
+            return ""
+        else:
+            if tag in html_classes_from_tag:
+                return ' class="%s"' % html_classes_from_tag[tag]
+        return ""
+
+    def _do_code_blocks(self, text):
+        """Process Markdown `<pre><code>` blocks."""
+        code_block_re = re.compile(r'''
+            (?:\n\n|\A\n?)
+            (               # $1 = the code block -- one or more lines, starting with a space/tab
+              (?:
+                (?:[ ]{%d} | \t)  # Lines must start with a tab or a tab-width of spaces
+                .*\n+
+              )+
+            )
+            ((?=^[ ]{0,%d}\S)|\Z)   # Lookahead for non-space at line-start, or end of doc
+            # Lookahead to make sure this block isn't already in a code block.
+            # Needed when syntax highlighting is being used.
+            (?![^<]*\</code\>)
+            ''' % (self.tab_width, self.tab_width),
+            re.M | re.X)
+        return code_block_re.sub(self._code_block_sub, text)
+
+    _fenced_code_block_re = re.compile(r'''
+        (?:\n+|\A\n?)
+        ^```([\w+-]+)?[ \t]*\n      # opening fence, $1 = optional lang
+        (.*?)                       # $2 = code block content
+        ^```[ \t]*\n                # closing fence
+        ''', re.M | re.X | re.S)
+
+    def _fenced_code_block_sub(self, match):
+        return self._code_block_sub(match, is_fenced_code_block=True)
+
+    def _do_fenced_code_blocks(self, text):
+        """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
+        return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
+
+    # Rules for a code span:
+    # - backslash escapes are not interpreted in a code span
+    # - to include one or or a run of more backticks the delimiters must
+    #   be a longer run of backticks
+    # - cannot start or end a code span with a backtick; pad with a
+    #   space and that space will be removed in the emitted HTML
+    # See `test/tm-cases/escapes.text` for a number of edge-case
+    # examples.
+    _code_span_re = re.compile(r'''
+            (?<!\\)
+            (`+)        # \1 = Opening run of `
+            (?!`)       # See Note A test/tm-cases/escapes.text
+            (.+?)       # \2 = The code block
+            (?<!`)
+            \1          # Matching closer
+            (?!`)
+        ''', re.X | re.S)
+
+    def _code_span_sub(self, match):
+        c = match.group(2).strip(" \t")
+        c = self._encode_code(c)
+        return "<code>%s</code>" % c
+
+    def _do_code_spans(self, text):
+        #   *   Backtick quotes are used for <code></code> spans.
+        #
+        #   *   You can use multiple backticks as the delimiters if you want to
+        #       include literal backticks in the code span. So, this input:
+        #
+        #         Just type ``foo `bar` baz`` at the prompt.
+        #
+        #       Will translate to:
+        #
+        #         <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
+        #
+        #       There's no arbitrary limit to the number of backticks you
+        #       can use as delimters. If you need three consecutive backticks
+        #       in your code, use four for delimiters, etc.
+        #
+        #   *   You can use spaces to get literal backticks at the edges:
+        #
+        #         ... type `` `bar` `` ...
+        #
+        #       Turns to:
+        #
+        #         ... type <code>`bar`</code> ...
+        return self._code_span_re.sub(self._code_span_sub, text)
+
+    def _encode_code(self, text):
+        """Encode/escape certain characters inside Markdown code runs.
+        The point is that in code, these characters are literals,
+        and lose their special Markdown meanings.
+        """
+        replacements = [
+            # Encode all ampersands; HTML entities are not
+            # entities within a Markdown code span.
+            ('&', '&amp;'),
+            # Do the angle bracket song and dance:
+            ('<', '&lt;'),
+            ('>', '&gt;'),
+        ]
+        for before, after in replacements:
+            text = text.replace(before, after)
+        hashed = _hash_text(text)
+        self._escape_table[text] = hashed
+        return hashed
+
+    _strike_re = re.compile(r"~~(?=\S)(.+?)(?<=\S)~~", re.S)
+    def _do_strike(self, text):
+        text = self._strike_re.sub(r"<strike>\1</strike>", text)
+        return text
+
+    _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
+    _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
+    _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
+    _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
+    def _do_italics_and_bold(self, text):
+        # <strong> must go first:
+        if "code-friendly" in self.extras:
+            text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
+            text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
+        else:
+            text = self._strong_re.sub(r"<strong>\2</strong>", text)
+            text = self._em_re.sub(r"<em>\2</em>", text)
+        return text
+
+    # "smarty-pants" extra: Very liberal in interpreting a single prime as an
+    # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
+    # "twixt" can be written without an initial apostrophe. This is fine because
+    # using scare quotes (single quotation marks) is rare.
+    _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
+    _contractions = ["tis", "twas", "twer", "neath", "o", "n",
+        "round", "bout", "twixt", "nuff", "fraid", "sup"]
+    def _do_smart_contractions(self, text):
+        text = self._apostrophe_year_re.sub(r"&#8217;\1", text)
+        for c in self._contractions:
+            text = text.replace("'%s" % c, "&#8217;%s" % c)
+            text = text.replace("'%s" % c.capitalize(),
+                "&#8217;%s" % c.capitalize())
+        return text
+
+    # Substitute double-quotes before single-quotes.
+    _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
+    _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
+    _closing_single_quote_re = re.compile(r"(?<=\S)'")
+    _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
+    def _do_smart_punctuation(self, text):
+        """Fancifies 'single quotes', "double quotes", and apostrophes.
+        Converts --, ---, and ... into en dashes, em dashes, and ellipses.
+
+        Inspiration is: <http://daringfireball.net/projects/smartypants/>
+        See "test/tm-cases/smarty_pants.text" for a full discussion of the
+        support here and
+        <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
+        discussion of some diversion from the original SmartyPants.
+        """
+        if "'" in text:  # guard for perf
+            text = self._do_smart_contractions(text)
+            text = self._opening_single_quote_re.sub("&#8216;", text)
+            text = self._closing_single_quote_re.sub("&#8217;", text)
+
+        if '"' in text:  # guard for perf
+            text = self._opening_double_quote_re.sub("&#8220;", text)
+            text = self._closing_double_quote_re.sub("&#8221;", text)
+
+        text = text.replace("---", "&#8212;")
+        text = text.replace("--", "&#8211;")
+        text = text.replace("...", "&#8230;")
+        text = text.replace(" . . . ", "&#8230;")
+        text = text.replace(". . .", "&#8230;")
+        return text
+
+    _block_quote_base = r'''
+        (                           # Wrap whole match in \1
+          (
+            ^[ \t]*>%s[ \t]?        # '>' at the start of a line
+              .+\n                  # rest of the first line
+            (.+\n)*                 # subsequent consecutive lines
+            \n*                     # blanks
+          )+
+        )
+    '''
+    _block_quote_re = re.compile(_block_quote_base % '', re.M | re.X)
+    _block_quote_re_spoiler = re.compile(_block_quote_base % '[ \t]*?!?', re.M | re.X)
+    _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M)
+    _bq_one_level_re_spoiler = re.compile('^[ \t]*>[ \t]*?![ \t]?', re.M)
+    _bq_all_lines_spoilers = re.compile(r'\A(?:^[ \t]*>[ \t]*?!.*[\n\r]*)+\Z', re.M)
+    _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
+    def _dedent_two_spaces_sub(self, match):
+        return re.sub(r'(?m)^  ', '', match.group(1))
+
+    def _block_quote_sub(self, match):
+        bq = match.group(1)
+        is_spoiler = 'spoiler' in self.extras and self._bq_all_lines_spoilers.match(bq)
+        # trim one level of quoting
+        if is_spoiler:
+            bq = self._bq_one_level_re_spoiler.sub('', bq)
+        else:
+            bq = self._bq_one_level_re.sub('', bq)
+        # trim whitespace-only lines
+        bq = self._ws_only_line_re.sub('', bq)
+        bq = self._run_block_gamut(bq)          # recurse
+
+        bq = re.sub('(?m)^', '  ', bq)
+        # These leading spaces screw with <pre> content, so we need to fix that:
+        bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
+
+        if is_spoiler:
+            return '<blockquote class="spoiler">\n%s\n</blockquote>\n\n' % bq
+        else:
+            return '<blockquote>\n%s\n</blockquote>\n\n' % bq
+
+    def _do_block_quotes(self, text):
+        if '>' not in text:
+            return text
+        if 'spoiler' in self.extras:
+            return self._block_quote_re_spoiler.sub(self._block_quote_sub, text)
+        else:
+            return self._block_quote_re.sub(self._block_quote_sub, text)
+
+    def _form_paragraphs(self, text):
+        # Strip leading and trailing lines:
+        text = text.strip('\n')
+
+        # Wrap <p> tags.
+        grafs = []
+        for i, graf in enumerate(re.split(r"\n{2,}", text)):
+            if graf in self.html_blocks:
+                # Unhashify HTML blocks
+                grafs.append(self.html_blocks[graf])
+            else:
+                cuddled_list = None
+                if "cuddled-lists" in self.extras:
+                    # Need to put back trailing '\n' for `_list_item_re`
+                    # match at the end of the paragraph.
+                    li = self._list_item_re.search(graf + '\n')
+                    # Two of the same list marker in this paragraph: a likely
+                    # candidate for a list cuddled to preceding paragraph
+                    # text (issue 33). Note the `[-1]` is a quick way to
+                    # consider numeric bullets (e.g. "1." and "2.") to be
+                    # equal.
+                    if (li and len(li.group(2)) <= 3 and li.group("next_marker")
+                        and li.group("marker")[-1] == li.group("next_marker")[-1]):
+                        start = li.start()
+                        cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
+                        assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
+                        graf = graf[:start]
+
+                # Wrap <p> tags.
+                graf = self._run_span_gamut(graf)
+                grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
+
+                if cuddled_list:
+                    grafs.append(cuddled_list)
+
+        return "\n\n".join(grafs)
+
+    def _add_footnotes(self, text):
+        if self.footnotes:
+            footer = [
+                '<div class="footnotes">',
+                '<hr' + self.empty_element_suffix,
+                '<ol>',
+            ]
+            for i, id in enumerate(self.footnote_ids):
+                if i != 0:
+                    footer.append('')
+                footer.append('<li id="fn-%s">' % id)
+                footer.append(self._run_block_gamut(self.footnotes[id]))
+                backlink = ('<a href="#fnref-%s" '
+                    'class="footnoteBackLink" '
+                    'title="Jump back to footnote %d in the text.">'
+                    '&#8617;</a>' % (id, i+1))
+                if footer[-1].endswith("</p>"):
+                    footer[-1] = footer[-1][:-len("</p>")] \
+                        + '&#160;' + backlink + "</p>"
+                else:
+                    footer.append("\n<p>%s</p>" % backlink)
+                footer.append('</li>')
+            footer.append('</ol>')
+            footer.append('</div>')
+            return text + '\n\n' + '\n'.join(footer)
+        else:
+            return text
+
+    # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
+    #   http://bumppo.net/projects/amputator/
+    _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
+    _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
+    _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
+
+    def _encode_amps_and_angles(self, text):
+        # Smart processing for ampersands and angle brackets that need
+        # to be encoded.
+        text = self._ampersand_re.sub('&amp;', text)
+
+        # Encode naked <'s
+        text = self._naked_lt_re.sub('&lt;', text)
+
+        # Encode naked >'s
+        # Note: Other markdown implementations (e.g. Markdown.pl, PHP
+        # Markdown) don't do this.
+        text = self._naked_gt_re.sub('&gt;', text)
+        return text
+
+    def _encode_backslash_escapes(self, text):
+        for ch, escape in list(self._escape_table.items()):
+            text = text.replace("\\"+ch, escape)
+        return text
+
+    _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
+    def _auto_link_sub(self, match):
+        g1 = match.group(1)
+        return '<a href="%s">%s</a>' % (g1, g1)
+
+    _auto_email_link_re = re.compile(r"""
+          <
+           (?:mailto:)?
+          (
+              [-.\w]+
+              \@
+              [-\w]+(\.[-\w]+)*\.[a-z]+
+          )
+          >
+        """, re.I | re.X | re.U)
+    def _auto_email_link_sub(self, match):
+        return self._encode_email_address(
+            self._unescape_special_chars(match.group(1)))
+
+    def _do_auto_links(self, text):
+        text = self._auto_link_re.sub(self._auto_link_sub, text)
+        text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
+        return text
+
+    def _encode_email_address(self, addr):
+        #  Input: an email address, e.g. "foo at example.com"
+        #
+        #  Output: the email address as a mailto link, with each character
+        #      of the address encoded as either a decimal or hex entity, in
+        #      the hopes of foiling most address harvesting spam bots. E.g.:
+        #
+        #    <a href="&#x6D;&#97;&#105;&#108;&#x74;&#111;:&#102;&#111;&#111;&#64;&#101;
+        #       x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;">&#102;&#111;&#111;
+        #       &#64;&#101;x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;</a>
+        #
+        #  Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
+        #  mailing list: <http://tinyurl.com/yu7ue>
+        chars = [_xml_encode_email_char_at_random(ch)
+                 for ch in "mailto:" + addr]
+        # Strip the mailto: from the visible part.
+        addr = '<a href="%s">%s</a>' \
+               % (''.join(chars), ''.join(chars[7:]))
+        return addr
+
+    def _do_link_patterns(self, text):
+        """Caveat emptor: there isn't much guarding against link
+        patterns being formed inside other standard Markdown links, e.g.
+        inside a [link def][like this].
+
+        Dev Notes: *Could* consider prefixing regexes with a negative
+        lookbehind assertion to attempt to guard against this.
+        """
+        link_from_hash = {}
+        for regex, repl in self.link_patterns:
+            replacements = []
+            for match in regex.finditer(text):
+                if hasattr(repl, "__call__"):
+                    href = repl(match)
+                else:
+                    href = match.expand(repl)
+                replacements.append((match.span(), href))
+            for (start, end), href in reversed(replacements):
+                escaped_href = (
+                    href.replace('"', '&quot;')  # b/c of attr quote
+                        # To avoid markdown <em> and <strong>:
+                        .replace('*', self._escape_table['*'])
+                        .replace('_', self._escape_table['_']))
+                link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
+                hash = _hash_text(link)
+                link_from_hash[hash] = link
+                text = text[:start] + hash + text[end:]
+        for hash, link in list(link_from_hash.items()):
+            text = text.replace(hash, link)
+        return text
+
+    def _unescape_special_chars(self, text):
+        # Swap back in all the special characters we've hidden.
+        for ch, hash in list(self._escape_table.items()):
+            text = text.replace(hash, ch)
+        return text
+
+    def _outdent(self, text):
+        # Remove one level of line-leading tabs or spaces
+        return self._outdent_re.sub('', text)
+
+
+class MarkdownWithExtras(Markdown):
+    """A markdowner class that enables most extras:
+
+    - footnotes
+    - code-color (only has effect if 'pygments' Python module on path)
+
+    These are not included:
+    - pyshell (specific to Python-related documenting)
+    - code-friendly (because it *disables* part of the syntax)
+    - link-patterns (because you need to specify some actual
+      link-patterns anyway)
+    """
+    extras = ["footnotes", "code-color"]
+
+
+# ---- internal support functions
+
+class UnicodeWithAttrs(unicode):
+    """A subclass of unicode used for the return value of conversion to
+    possibly attach some attributes. E.g. the "toc_html" attribute when
+    the "toc" extra is used.
+    """
+    metadata = None
+    _toc = None
+    def toc_html(self):
+        """Return the HTML for the current TOC.
+
+        This expects the `_toc` attribute to have been set on this instance.
+        """
+        if self._toc is None:
+            return None
+
+        def indent():
+            return '  ' * (len(h_stack) - 1)
+        lines = []
+        h_stack = [0]   # stack of header-level numbers
+        for level, id, name in self._toc:
+            if level > h_stack[-1]:
+                lines.append("%s<ul>" % indent())
+                h_stack.append(level)
+            elif level == h_stack[-1]:
+                lines[-1] += "</li>"
+            else:
+                while level < h_stack[-1]:
+                    h_stack.pop()
+                    if not lines[-1].endswith("</li>"):
+                        lines[-1] += "</li>"
+                    lines.append("%s</ul></li>" % indent())
+            lines.append('%s<li><a href="#%s">%s</a>' % (
+                indent(), id, name))
+        while len(h_stack) > 1:
+            h_stack.pop()
+            if not lines[-1].endswith("</li>"):
+                lines[-1] += "</li>"
+            lines.append("%s</ul>" % indent())
+        return '\n'.join(lines) + '\n'
+    toc_html = property(toc_html)
+
+## {{{ http://code.activestate.com/recipes/577257/ (r1)
+_slugify_strip_re = re.compile(r'[^\w\s-]')
+_slugify_hyphenate_re = re.compile(r'[-\s]+')
+def _slugify(value):
+    """
+    Normalizes string, converts to lowercase, removes non-alpha characters,
+    and converts spaces to hyphens.
+
+    From Django's "django/template/defaultfilters.py".
+    """
+    import unicodedata
+    value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
+    value = _slugify_strip_re.sub('', value).strip().lower()
+    return _slugify_hyphenate_re.sub('-', value)
+## end of http://code.activestate.com/recipes/577257/ }}}
+
+
+# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
+def _curry(*args, **kwargs):
+    function, args = args[0], args[1:]
+    def result(*rest, **kwrest):
+        combined = kwargs.copy()
+        combined.update(kwrest)
+        return function(*args + rest, **combined)
+    return result
+
+
+# Recipe: regex_from_encoded_pattern (1.0)
+def _regex_from_encoded_pattern(s):
+    """'foo'    -> re.compile(re.escape('foo'))
+       '/foo/'  -> re.compile('foo')
+       '/foo/i' -> re.compile('foo', re.I)
+    """
+    if s.startswith('/') and s.rfind('/') != 0:
+        # Parse it: /PATTERN/FLAGS
+        idx = s.rfind('/')
+        pattern, flags_str = s[1:idx], s[idx+1:]
+        flag_from_char = {
+            "i": re.IGNORECASE,
+            "l": re.LOCALE,
+            "s": re.DOTALL,
+            "m": re.MULTILINE,
+            "u": re.UNICODE,
+        }
+        flags = 0
+        for char in flags_str:
+            try:
+                flags |= flag_from_char[char]
+            except KeyError:
+                raise ValueError("unsupported regex flag: '%s' in '%s' "
+                                 "(must be one of '%s')"
+                                 % (char, s, ''.join(list(flag_from_char.keys()))))
+        return re.compile(s[1:idx], flags)
+    else:  # not an encoded regex
+        return re.compile(re.escape(s))
+
+
+# Recipe: dedent (0.1.2)
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+        "lines" is a list of lines to dedent.
+        "tabsize" is the tab width to use for indent width calculations.
+        "skip_first_line" is a boolean indicating if the first line should
+            be skipped for calculating the indent width and for dedenting.
+            This is sometimes useful for docstrings and similar.
+
+    Same as dedent() except operates on a sequence of lines. Note: the
+    lines list is modified **in-place**.
+    """
+    DEBUG = False
+    if DEBUG:
+        print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+              % (tabsize, skip_first_line))
+    margin = None
+    for i, line in enumerate(lines):
+        if i == 0 and skip_first_line: continue
+        indent = 0
+        for ch in line:
+            if ch == ' ':
+                indent += 1
+            elif ch == '\t':
+                indent += tabsize - (indent % tabsize)
+            elif ch in '\r\n':
+                continue  # skip all-whitespace lines
+            else:
+                break
+        else:
+            continue  # skip all-whitespace lines
+        if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
+        if margin is None:
+            margin = indent
+        else:
+            margin = min(margin, indent)
+    if DEBUG: print("dedent: margin=%r" % margin)
+
+    if margin is not None and margin > 0:
+        for i, line in enumerate(lines):
+            if i == 0 and skip_first_line: continue
+            removed = 0
+            for j, ch in enumerate(line):
+                if ch == ' ':
+                    removed += 1
+                elif ch == '\t':
+                    removed += tabsize - (removed % tabsize)
+                elif ch in '\r\n':
+                    if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
+                    lines[i] = lines[i][j:]
+                    break
+                else:
+                    raise ValueError("unexpected non-whitespace char %r in "
+                                     "line %r while removing %d-space margin"
+                                     % (ch, line, margin))
+                if DEBUG:
+                    print("dedent: %r: %r -> removed %d/%d"\
+                          % (line, ch, removed, margin))
+                if removed == margin:
+                    lines[i] = lines[i][j+1:]
+                    break
+                elif removed > margin:
+                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+                    break
+            else:
+                if removed:
+                    lines[i] = lines[i][removed:]
+    return lines
+
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+        "text" is the text to dedent.
+        "tabsize" is the tab width to use for indent width calculations.
+        "skip_first_line" is a boolean indicating if the first line should
+            be skipped for calculating the indent width and for dedenting.
+            This is sometimes useful for docstrings and similar.
+
+    textwrap.dedent(s), but don't expand tabs to spaces
+    """
+    lines = text.splitlines(1)
+    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+    return ''.join(lines)
+
+
+class _memoized(object):
+    """Decorator that caches a function's return value each time it is called.
+    If called later with the same arguments, the cached value is returned, and
+    not re-evaluated.
+
+    http://wiki.python.org/moin/PythonDecoratorLibrary
+    """
+    def __init__(self, func):
+        self.func = func
+        self.cache = {}
+
+    def __call__(self, *args):
+        try:
+            return self.cache[args]
+        except KeyError:
+            self.cache[args] = value = self.func(*args)
+            return value
+        except TypeError:
+            # uncachable -- for instance, passing a list as an argument.
+            # Better to not cache than to blow up entirely.
+            return self.func(*args)
+
+    def __repr__(self):
+        """Return the function's docstring."""
+        return self.func.__doc__
+
+
+def _xml_oneliner_re_from_tab_width(tab_width):
+    """Standalone XML processing instruction regex."""
+    return re.compile(r"""
+        (?:
+            (?<=\n\n)       # Starting after a blank line
+            |               # or
+            \A\n?           # the beginning of the doc
+        )
+        (                           # save in $1
+            [ ]{0,%d}
+            (?:
+                <\?\w+\b\s+.*?\?>   # XML processing instruction
+                |
+                <\w+:\w+\b\s+.*?/>  # namespaced single tag
+            )
+            [ \t]*
+            (?=\n{2,}|\Z)       # followed by a blank line or end of document
+        )
+        """ % (tab_width - 1), re.X)
+_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
+
+
+def _hr_tag_re_from_tab_width(tab_width):
+    return re.compile(r"""
+        (?:
+            (?<=\n\n)       # Starting after a blank line
+            |               # or
+            \A\n?           # the beginning of the doc
+        )
+        (                       # save in \1
+            [ ]{0,%d}
+            <(hr)               # start tag = \2
+            \b                  # word break
+            ([^<>])*?           #
+            /?>                 # the matching end tag
+            [ \t]*
+            (?=\n{2,}|\Z)       # followed by a blank line or end of document
+        )
+        """ % (tab_width - 1), re.X)
+_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
+
+
+def _xml_escape_attr(attr, skip_single_quote=True):
+    """Escape the given string for use in an HTML/XML tag attribute.
+
+    By default this doesn't bother with escaping `'` to `&#39;`, presuming that
+    the tag attribute is surrounded by double quotes.
+    """
+    escaped = (attr
+        .replace('&', '&amp;')
+        .replace('"', '&quot;')
+        .replace('<', '&lt;')
+        .replace('>', '&gt;'))
+    if not skip_single_quote:
+        escaped = escaped.replace("'", "&#39;")
+    return escaped
+
+
+def _xml_encode_email_char_at_random(ch):
+    r = random()
+    # Roughly 10% raw, 45% hex, 45% dec.
+    # '@' *must* be encoded. I [John Gruber] insist.
+    # Issue 26: '_' must be encoded.
+    if r > 0.9 and ch not in "@_":
+        return ch
+    elif r < 0.45:
+        # The [1:] is to drop leading '0': 0x63 -> x63
+        return '&#%s;' % hex(ord(ch))[1:]
+    else:
+        return '&#%s;' % ord(ch)
+
+
+def _urlencode(attr, safe_mode=False):
+    """Replace special characters in string using the %xx escape."""
+    if safe_mode:
+        escaped = quote_plus(attr).replace('+', ' ')
+    else:
+        escaped = attr.replace('"', '%22')
+    return escaped
+
+
+# ---- mainline
+
+class _NoReflowFormatter(optparse.IndentedHelpFormatter):
+    """An optparse formatter that does NOT reflow the description."""
+    def format_description(self, description):
+        return description or ""
+
+
+def _test():
+    import doctest
+    doctest.testmod()
+
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+    if not logging.root.handlers:
+        logging.basicConfig()
+
+    usage = "usage: %prog [PATHS...]"
+    version = "%prog "+__version__
+    parser = optparse.OptionParser(prog="markdown2", usage=usage,
+        version=version, description=cmdln_desc,
+        formatter=_NoReflowFormatter())
+    parser.add_option("-v", "--verbose", dest="log_level",
+                      action="store_const", const=logging.DEBUG,
+                      help="more verbose output")
+    parser.add_option("--encoding",
+                      help="specify encoding of text content")
+    parser.add_option("--html4tags", action="store_true", default=False,
+                      help="use HTML 4 style for empty element tags")
+    parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
+                      help="sanitize literal HTML: 'escape' escapes "
+                           "HTML meta chars, 'replace' replaces with an "
+                           "[HTML_REMOVED] note")
+    parser.add_option("-x", "--extras", action="append",
+                      help="Turn on specific extra features (not part of "
+                           "the core Markdown spec). See above.")
+    parser.add_option("--use-file-vars",
+                      help="Look for and use Emacs-style 'markdown-extras' "
+                           "file var to turn on extras. See "
+                           "<https://github.com/trentm/python-markdown2/wiki/Extras>")
+    parser.add_option("--link-patterns-file",
+                      help="path to a link pattern file")
+    parser.add_option("--self-test", action="store_true",
+                      help="run internal self-tests (some doctests)")
+    parser.add_option("--compare", action="store_true",
+                      help="run against Markdown.pl as well (for testing)")
+    parser.set_defaults(log_level=logging.INFO, compare=False,
+                        encoding="utf-8", safe_mode=None, use_file_vars=False)
+    opts, paths = parser.parse_args()
+    log.setLevel(opts.log_level)
+
+    if opts.self_test:
+        return _test()
+
+    if opts.extras:
+        extras = {}
+        for s in opts.extras:
+            splitter = re.compile("[,;: ]+")
+            for e in splitter.split(s):
+                if '=' in e:
+                    ename, earg = e.split('=', 1)
+                    try:
+                        earg = int(earg)
+                    except ValueError:
+                        pass
+                else:
+                    ename, earg = e, None
+                extras[ename] = earg
+    else:
+        extras = None
+
+    if opts.link_patterns_file:
+        link_patterns = []
+        f = open(opts.link_patterns_file)
+        try:
+            for i, line in enumerate(f.readlines()):
+                if not line.strip(): continue
+                if line.lstrip().startswith("#"): continue
+                try:
+                    pat, href = line.rstrip().rsplit(None, 1)
+                except ValueError:
+                    raise MarkdownError("%s:%d: invalid link pattern line: %r"
+                                        % (opts.link_patterns_file, i+1, line))
+                link_patterns.append(
+                    (_regex_from_encoded_pattern(pat), href))
+        finally:
+            f.close()
+    else:
+        link_patterns = None
+
+    from os.path import join, dirname, abspath, exists
+    markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
+                       "Markdown.pl")
+    if not paths:
+        paths = ['-']
+    for path in paths:
+        if path == '-':
+            text = sys.stdin.read()
+        else:
+            fp = codecs.open(path, 'r', opts.encoding)
+            text = fp.read()
+            fp.close()
+        if opts.compare:
+            from subprocess import Popen, PIPE
+            print("==== Markdown.pl ====")
+            p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
+            p.stdin.write(text.encode('utf-8'))
+            p.stdin.close()
+            perl_html = p.stdout.read().decode('utf-8')
+            if py3:
+                sys.stdout.write(perl_html)
+            else:
+                sys.stdout.write(perl_html.encode(
+                    sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+            print("==== markdown2.py ====")
+        html = markdown(text,
+            html4tags=opts.html4tags,
+            safe_mode=opts.safe_mode,
+            extras=extras, link_patterns=link_patterns,
+            use_file_vars=opts.use_file_vars)
+        if py3:
+            sys.stdout.write(html)
+        else:
+            sys.stdout.write(html.encode(
+                sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
+        if extras and "toc" in extras:
+            log.debug("toc_html: " +
+                str(html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')))
+        if opts.compare:
+            test_dir = join(dirname(dirname(abspath(__file__))), "test")
+            if exists(join(test_dir, "test_markdown2.py")):
+                sys.path.insert(0, test_dir)
+                from test_markdown2 import norm_html_from_html
+                norm_html = norm_html_from_html(html)
+                norm_perl_html = norm_html_from_html(perl_html)
+            else:
+                norm_html = html
+                norm_perl_html = perl_html
+            print("==== match? %r ====" % (norm_perl_html == norm_html))
+
+
+if __name__ == "__main__":
+    sys.exit(main(sys.argv))
Index: checkout/doc/old/README.md
===================================================================
--- checkout/doc/old/README.md	(nonexistent)
+++ checkout/doc/old/README.md	(revision 94669)
@@ -0,0 +1,17 @@
+National Environmental Modeling System (NEMS) Version 4.0
+=========================================================
+
+This is the source directory for the NEMS component of NEMS-based
+modeling systems.  It:
+
+* Employs ESMF superstructure & utilities
+* Separate dynamics, physics, and coupler grid components &
+  import/export states
+* Digital filtering capabilities added
+* Adiabatic (dynamics only) simulation capability
+* Enhanced postprocessing capability
+
+Most documentation that was in this file is now in the doc/
+subdirectory, or in the app-level doc/ directory.  This documentation
+is automatically combined into a single HTML file when runs `make` in
+the doc/ subdirectory.
Index: checkout/doc/old/OLDTEST.md
===================================================================
--- checkout/doc/old/OLDTEST.md	(nonexistent)
+++ checkout/doc/old/OLDTEST.md	(revision 94669)
@@ -0,0 +1,165 @@
+Old Regression Test System
+==========================
+
+This section documents the old rt.sh system, which has been replaced.
+This system is no longer supported, but has been retained for backward
+compatibility.  Furthermore, the current NEMSCompsetRun requires this
+script.  The NEMSCompsetRun will be updated shortly, after some
+modulefile changes are made in NEMS.
+
+Running rt.sh
+-------------
+
+The older regression test system is run as follows in bash, sh, or ksh:
+
+    cd NEMS/oldtests
+    ./rt.sh (command) (options) > rt.log 2>&1 &
+
+In csh or tcsh, do this:
+
+    cd NEMS/oldtests
+    ./rt.sh (command) (options) >& rt.log &
+
+This will launch a background process that runs the `rt.sh` and logs
+the status to `rt.log`.
+
+The `(command)` must include at least one of the following, which
+specify what is being run:
+
+* `-f` - run all tests that work on your platform
+* `-s` - run "standard" tests that work on your platform
+* `-c gfs` - create a new baseline for the gsm, wam and gocart tests
+* `-c nmm` - create a new baseline for nmm tests
+
+In addition, the following `(option)`s work:
+
+* `-m` - compare against user's baseline
+* `-l FILE` - use an alternate file instead of `rt.conf`
+
+The `>rt.log2>&` or `>&rt.log` are redirection operators in your
+shell.  They will log additional information from `rt.sh` to `rt.log`.
+This is critical when debugging problems such as batch system failures
+or disk quota problems.
+
+Output of rt.sh
+---------------
+
+The rt.sh produces several log files and a directory of results.
+
+* `RegressionTests_(platform).log` - regression test results
+* `Compile_(platform).log` - compilation logs
+* `rt.log` - debug output of rt.sh if you used the redirection operators
+* `/path/to/stmp/$USER/rt.$$/` - directory in which the tests were run
+
+In these paths,
+
+* `(platform)` - "theia" or "wcoss", the platform on which you ran
+* `/path/to/stmp` - is the scrub area chosen by the scripts
+* `$USER` - is your username
+* `$$` - is a unique id chosen by the `rt.sh` to avoid overwriting
+  an old run.  Generally it is the UNIX process id of `rt.sh`
+
+To find `/path/to/stmp` and `$$` you need to look in the log of `rt.sh`
+for a line like this, near the top of the `rt.log`:
+
+    mkdir -p /scratch4/NCEPDEV/stmp3/Samuel.Trahan/rt_104307
+
+Within that directory, you will find one directory for each test:
+
+    you at theia> ls -1 /scratch4/NCEPDEV/stmp3/Samuel.Trahan/rt_104307
+    gfs_eulerian
+    gfs_gocart_nemsio
+    gfs_slg
+    gfs_slg_48pe
+    gfs_slg_adiabatic
+    gfs_slg_land
+    gfs_slg_nsst
+    gfs_slg_rsthst
+    gfs_slg_stochy
+    gfs_slg_t574
+    nmm_2way_nests
+    nmm_2way_nests_debug
+    nmm_2way_nests_restart
+
+Each directory contains input and output files for each test.  Some
+files of interest are:
+
+* `err` - stderr stream from the batch job that ran this program
+* `out` - stdout stream from the batch job that ran this program
+* `PET*.ESMF_LogFile` - ESMF log files from each MPI rank
+* `nemsusage.xml` - resource usage information for all MPI ranks
+* `timing.summary` - resource usage information for rank 0
+
+Configuring rt.sh: The rt.conf
+------------------------------
+
+The `rt.sh` guides its function based on the `rt.conf` file.  That
+file can be found in the NEMS/oldtests directory and has the following
+syntax:
+
+| COMMAND  |    METHOD    |  SUBSET  | PLATFORM |   VERSION  |
+| -------- | ------------ | -------- | -------- | ---------- |
+| APPBUILD | app=APP-NAME | standard |          |            |
+| RUN      | test_name    | standard |          | nmm        |
+| COMPILE  | nmm          | standard |          | nmmb_intel |
+
+The available commands are:
+
+* `APPBUILD` - run the [NEMSAppBuilder](#ni-appbuild) and load new modules
+* `RUN` - run a test
+* `COMPILE` - no longer supported; runs the [manual build system](#manual-method)
+
+The meaning of the other arguments depends on the command, and is
+described below.
+
+### rt.conf APPBUILD Command
+
+When the command is `APPBUILD` the other arguments have these meanings:
+
+* `METHOD` - arguments to send to the NEMSAppBuilder
+* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
+  here, then only the `rt.sh -s` mode will run this build.
+* `PLATFORM` - `wcoss` to run only on WCOSS, `theia` to run only on Theia,
+  or empty (all whitespace) to run on all platforms
+* `VERSION` - unused; leave this blank (all whitespace)
+
+### rt.conf RUN Command
+
+The RUN command runs a test.  The meanings of the columns are as follows:
+
+* `METHOD` - name of the test.  This must correspond to a file in the
+  NEMS/oldtests/tests directory.
+* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
+  here, then only the `rt.sh -s` mode will run this build.
+* `PLATFORM` - `wcoss` to run only on WCOSS, `theia` to run only on Theia,
+  or empty (all whitespace) to run on all platforms
+* `VERSION` - which model this pertains to: `nmm` or `gfs`
+
+### rt.conf COMPILE command
+
+This command runs the [manual build system](#manual-method).  This is
+unsupported and retained only for debugging the new build system.
+
+* `METHOD` - arguments to the `make` command
+* `SUBSET` - `standard` or empty (all whitespace).  If `standard` is
+  here, then only the `rt.sh -s` mode will run this build.
+* `PLATFORM` - Mandatory.  Must be `wcoss` to run only on WCOSS or
+  `theia` to run only on Theia.  This is used to construct the
+  `configure` command.
+* `VERSION` - Mandatory. The ESMF version, passed to the `configure` command.
+
+In the `COMPILE` mode, the following commands are run based on those
+arguments:
+
+    ./configure (VERSION)_(PLATFORM)
+    source conf/modules.nems
+    gmake clean
+    gmake (METHOD) J=-j2
+
+### Subsetting Tests in rt.conf
+
+Note that you can explicitly disable parts of the test suite by
+commenting out lines of rt.conf.  Note that disabling the build
+commands (APPBUILD or COMPILE) will skip the build process and cause
+tests to be run with whatever NEMS.x and modules.conf presently in
+the NEMS external.
Index: checkout/doc/old/BUILD.md
===================================================================
--- checkout/doc/old/BUILD.md	(nonexistent)
+++ checkout/doc/old/BUILD.md	(revision 94669)
@@ -0,0 +1,129 @@
+<a name="building"></a>Building NEMS
+==========================================
+
+This chapter describes the options for building the NEMSLegacy, and
+the supported platforms.  There are three ways to build: the
+NEMSAppBuilder in interactive or non-interactive mode, or a manual
+process.  The recommended way to compile the NEMS is to use the
+NEMSAppBuilder in non-interactive mode.  However, all methods are
+described here.  We also provide troubleshooting information at the
+end of this chapter.
+
+Build Targets
+-------------
+
+The list of build targets available for an app is found at the top
+level of the app in `*.appBuilder` files.  The app-level documentation
+should have information about the meanings and purpose of each build
+target.
+
+<a name="ni-appbuild"></a> Recommended Method: Non-Interactive NEMSAppBuilder
+-----------------------------------------------------------------------------
+
+From the top level (directory above NEMS), run the `NEMSAppBuilder`.
+This is a build script that knows how to build various apps, and works
+for more than just the NEMSLegacy.  The syntax is:
+
+    ./NEMS/NEMSAppBuilder (options) app=(app)
+
+Here, the `(app)` is the selected application as discussed in the
+[Supported Builds and Platforms section](#supbuild).  The `(options)`
+should be one of the following:
+
+* `rebuild` - clean the source directory before recompiling.
+* `norebuild` - do not clean; reuse existing libraries and object
+  files whenever possible.
+
+
+Interactive NEMSAppBuilder
+--------------------------
+
+The NEMSAppBuilder can be run in interactive mode.  To do so, simply
+run the command without any arguments:
+
+    ./NEMS/NEMSAppBuilder
+
+The NEMSAppBuilder will instruct you further.  Note that this method
+will discard some of the log files, which makes build failures harder
+to track.  Also, it has some errors in its process tracking, and will
+kill the wrong processes when a build is canceled.  Such bugs are why
+the non-interactive mode is preferred.
+
+
+
+<a name="manual-method"></a>Manual Method: Configure, Clean, Make
+-----------------------------------------------------------------
+
+It is possible to build all apps via a manual method.  This method
+also makes other, undocumented, features available.  Ultimately, the
+NEMSAppBuilder is simply a wrapper around these manual commands.
+Before using such manual commands, it is best to talk to code managers
+to make sure you are building correctly.
+
+The manual method works like this:
+
+    cd NEMS/src/
+    ./configure (method)
+    source conf/modules.nems
+    gmake clean
+    gmake (nems-ver) J=-j2
+
+The `(method)` is one of the available configurations.  Run
+`./configure help` to get a list, or read the `configure` script.
+The `(nems-ver)` is one of the following:
+
+* `gsm` - build the GSM without GOCART
+* `gsm GOCART_MODE=full`
+* `nmm` - build the NMM without debug
+* `nmm DEBUG=on` - build NMM in debug mode
+* `nmm_post` - build NMM with inline post-processing
+
+
+
+Troubleshooting Failed Builds
+-----------------------------
+
+### Incomplete Checkout
+
+When there are network problems or high server load, your checkout
+from the Subversion and Git repositories may fail.  This will lead to
+any number of confusing errors while building.  You can continue the
+checkout process by going to the top level (above the NEMS directory) and running
+`svn update`.  Repeat that until no more files are updated, and no
+errors are reported.
+
+### Unclean Environment
+
+Setting up your environment incorrectly can lead to problems while
+building.  If you see build issues from a clean, new checkout, this
+may be the problem.  You should remove all `module` commands from your
+`~/.*rc` files and get a clean, new login shell.  Then retry the
+build.
+
+### Unclean Checkout
+
+Another common cause of failed builds is having unintended changes in
+your source code or build system.  To test for this, get a clean, new
+checkout from the repository and retry.
+
+### Unsupported Platform
+
+Some apps only support a few platforms.  For example, the NEMSLegacy
+app is only supported on WCOSS Phase 1 (Gyre/Tide) and NOAA Theia.
+Attempts to build on other platforms may or may not work.
+
+### Simultaneous Builds
+
+Attempting to build multiple times in the same NEMS checkout directory
+will cause unexpected failures.  For example, if you are running the
+regression test system twice at once, multiple builds will happen at
+the same time.  On Theia, this frequently shows up as a massive, many
+terabyte, file which cannot be created due to fileset quota limits.
+Other failure modes are possible.
+
+
+
+
+
+
+
Index: checkout/doc/old/md2html.py
===================================================================
--- checkout/doc/old/md2html.py	(nonexistent)
+++ checkout/doc/old/md2html.py	(revision 94669)
@@ -0,0 +1,83 @@
+#! /usr/bin/env python
+
+# Generates the README.html from README.md using the markdown2 module
+# in markdown.py.  Provide no arguments; just run from the app-level
+# doc directory.
+
+import markdown2
+import StringIO
+import logging
+import sys
+
+# Logging object for this module:
+logger=None
+
+TOP='''<html>
+<head>
+  <title>NEMSLegacy Build and Test Instructions</title>
+  <link rel="stylesheet" type="text/css" href="README.css">
+</head>
+<body>
+'''
+
+BOTTOM='''
+</body>
+</html>'''
+
+# List of extra options to turn on in markdown2:
+EXTRAS=['tables','code-friendly']
+
+def write(outfile,infiles):
+    # Open README.html as htmlf, in truncate mode:
+    logger.info('%s: output file'%(outfile,))
+    with open(outfile,'wt') as htmlf:
+        # Write the heading and open the <body> tag:
+        htmlf.write(TOP)
+
+        # Loop over all input files, writing each one:
+        for infile in infiles:
+            logger.info('%s: input file'%(infile,))
+            try:
+                htmlf.write(convert(infile))
+            except EnvironmentError as ee:
+                logger.warning('%s: skipping file: %s'%(infile,str(ee)))
+
+        # Close the body and html tags:
+        htmlf.write(BOTTOM)
+
+
+def convert(infile):
+    # Open the *.md file as mdf:
+    with open(infile,'rt') as mdf:
+        # Read all of the *.md file into md:
+        md=mdf.read()
+
+    # convert the contents of the *.md file to HTML and return it:
+    return markdown2.markdown(md,extras=EXTRAS)
+    
+def initlogging():
+    global logger
+    logger=logging.getLogger('md2html')
+    oformat=logging.Formatter(
+        "%(asctime)s.%(msecs)03d %(name)s (%(filename)s:%(lineno)d) "
+        "%(levelname)s: %(message)s",
+        "%m/%d %H:%M:%S")
+    root=logging.getLogger()
+    root.setLevel(logging.INFO)
+    logstream=logging.StreamHandler(sys.stderr)
+    logstream.setFormatter(oformat)
+    logstream.setLevel(logging.INFO)
+    root.addHandler(logstream)
+
+def main(args):
+    initlogging()
+    if len(args)<2:
+        usage()
+    outfile=args[-1]   # last argument is the output file
+    infiles=args[0:-1] # remaining arguments are the input files
+    logger.info('Out %s in %s'%(outfile,
+                                ':'.join(infiles)))
+    write(outfile,infiles)
+
+if __name__=='__main__':
+    main(sys.argv[1:])

Property changes on: checkout/doc/old/md2html.py
___________________________________________________________________
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: checkout/doc
===================================================================
--- checkout/doc	(revision 93212)
+++ checkout/doc	(revision 94669)

Property changes on: checkout/doc
___________________________________________________________________
Added: svn:mergeinfo
## -0,0 +0,111 ##
   Merged /nems/restored/moorthi/nems_rev_75207/doc:r76978-76981
   Merged /nems/branches_restored/UGCS-Seasonal/highresT574/doc:r76982-76984
   Merged /nems/branches/patrick/nems_4gsm_nuopc/doc:r67803-67964
   Merged /nems/branches/dusan/configure_file_cleanup/doc:r36347-36704
   Merged /nems/branches/jun/dfi_rsthst/doc:r70552-71555
   Merged /nems/branches/wx20rv/ESMF_PET/doc:r22223-22271
   Merged /nems/branches/seaice/doc:r50569-51523
   Merged /nems/branches/UGCS-Seasonal/highres-from80403/doc:r80427-80531
   Merged /nems/branches/jun/nems_gsmtrapon/doc:r76084-76288
   Merged /nems/branches/UGCS-Seasonal/highres/doc:r77665-78058
   Merged /nems/branches/theurich/nems-moreExports/doc:r39551-39687
   Merged /nems/branches/sarah/ngac_v2/doc:r46433-49633
   Merged /nems/branches/euge/nemstrunk_NEMSLegacyM/doc:r77740-78870
   Merged /nems/branches/black/nestbc/doc:r32272-40171
   Merged /nems/branches/jun/ldfi_grd_sp/doc:r69750-70244
   Merged /nems/branches/jun/nemswam_tvd/doc:r39770-40798
   Merged /nems/branches/fv3-jet/doc:r89946-93395
   Merged /nems/branches/jun/nemsgfs_vldt/doc:r21544-21613
   Merged /nems/branches/moorthi/nems_slg_shoc/doc:r59205-62067
   Merged /nems/branches/euge/nemstrunk_NEMSLegacy/doc:r77170-77739
   Merged /nems/branches/black/two_way/doc:r29469-30859,30963-32170
   Merged /nems/branches/dusan/nest_restart_lnsv/doc:r27317-27975
   Merged /nems/branches/stochy.tmp/doc:r75830-75899
   Merged /nems/branches/rusage/doc:r83939-84809
   Merged /nems/branches/xwu/doc:r33485-33786,42992-43776
   Merged /nems/branches/UGCS-Seasonal/fixArcticInstability/doc:r78279-78426
   Merged /nems/branches/jun/moorthi_79545/doc:r79546-79768
   Merged /nems/branches/NUOPC/development/doc:r39688-54597,54848-54863,55006-74352
   Merged /nems/branches/UGCS-Seasonal/medrest/doc:r75832-76575
   Merged /nems/branches/nems_tic182/doc:r79546-79630
   Merged /nems/restored/UGCS-Seasonal/highresT574/doc:r76980-76981
   Merged /nems/branches/jun/gsm_logfile/doc:r84848-84910
   Merged /nems/branches/patrick/doc:r17744-18674
   Merged /nems/branches/NEMSUpdate/UGCSWeather/doc:r93382-94235,94237-94655
   Merged /nems/branches/xingren/doc:r38461-40634,56840-57360
   Merged /nems/branches_restored/moorthi/nems_rev_75207/doc:r76982-76984
   Merged /nems/branches/jun/NEMSgsm_lib/doc:r81676-81843
   Merged /nems/branches/theurich/BIGMERGE-phase2/doc:r74478-74845
   Merged /nems/branches/two_way/doc:r16343-20484,21200-29433
   Merged /nems/branches/moorthi/nems_r63297/doc:r63582-75816
   Merged /nems/branches/stochy/doc:r75900-76005
   Merged /nems/branches/NUOPC/dev54597branch/doc:r54598-54847
   Merged /nems/branches/jun/gocart_test/doc:r82019-82173
   Merged /nems/branches/mfcnvcldC/doc:r73722-73907
   Merged /nems/branches/mfcnvcldB/doc:r72466-72493
   Merged /nems/branches/appify-fv3/doc:r87981-89248
   Merged /nems/branches/wx20rv/fix_make/doc:r21385-21475
   Merged /nems/branches/danrosen/development/doc:r56364-70568
   Merged /nems/branches/PhysDrvII/doc:r80168-80227
   Merged /nems/branches/crayfix/doc:r90333-90926
   Merged /nems/branches/regtests/doc:r84324-84807,86448-86844,92938-93574
   Merged /nems/branches/dusan/nnp_cleanup/doc:r64420-67599
   Merged /nems/branches/dusan/no_water/doc:r35808-40238
   Merged /nems/branches/nems_ndfi/doc:r77787-77842
   Merged /nems/branches/UGCS-Seasonal/landmask/doc:r76708-76894
   Merged /nems/branches/merge-fixes/doc:r94234-94668
   Merged /nems/branches/dusan/new_regtest/doc:r41734-45881
   Merged /nems/branches/jun/nems_wam/doc:r24962-25704
   Merged /nems/branches/regtests-update/doc:r83875-84033
   Merged /nems/branches/dusan/no_mixratio/doc:r40612-41262
   Merged /nems/branches/jun/gsm_timer/doc:r82802-82895
   Merged /nems/branches/modulefiles/doc:r85136-85744
   Merged /nems/branches/gocart-fix/doc:r93814-94233
   Merged /nems/branches/dusan/q2e2/doc:r35340-35406
   Merged /nems/branches/PhysDrvIII/doc:r82549-82750
   Merged /nems/branches/UGCS-Seasonal/highres-from80237/doc:r80281-80451
   Merged /nems/branches/jun/ngac_v2p1/doc:r49677-50735
   Merged /nems/branches/UGCS-Seasonal/highres-from77725/doc:r78058-80286
   Merged /nems/branches/BIGMERGE/doc:r74144-74994
   Merged /nems/branches/juang/ticket21/doc:r24966-25301
   Merged /nems/branches/sam/updated_medfixes_q3fy2017_lambda/doc:r93214-93574
   Merged /nems/branches/tripptime/doc:r84602-84673
   Merged /nems/branches/dusan/onegc/doc:r14685-16806
   Merged /nems/branches/NUOPC/mergeworx/doc:r39685-50268
   Merged /nems/branches/moorthi/nems_06172013/doc:r29212-31661
   Merged /nems/branches/dusan/old_passive_adv/doc:r35154-35260
   Merged /nems/branches/dusan/pint_rename/doc:r34844-35115
   Merged /nems/branches/moorthi/nems_slg/doc:r42207-60305
   Merged /nems/branches/euge/nemstrunk_NEMSLegacyMD_ph2/doc:r79817-80398
   Merged /nems/branches/nohup-at/doc:r92696-93211
   Merged /nems/branches/NUOPC/gjt/doc:r72003-72004
   Merged /nems/branches/nemsgfs_test/doc:r21253-21543
   Merged /nems/branches/oldtests/doc:r84899-85081
   Merged /nems/branches/NUOPC/COLA/doc:r58213-59766
   Merged /nems/branches/gsm_rst_zhour/doc:r85568-85606
   Merged /nems/branches/euge/nems_physchem_after_commit_changes/doc:r80557-80719
   Merged /nems/branches/jun/out_highfreq/doc:r68497-68951
   Merged /nems/branches/moorthi/nems_rev_75207/doc:r75208-76977,76985-79545
   Merged /nems/branches/theurich/mergeworx/doc:r39684
   Merged /nems/branches/theurich/removeIfdef/doc:r75033-75083
   Merged /nems/branches/project/doc:r85958-86237
   Merged /nems/branches/jun/nems_with_gsm/doc:r22209-22420
   Merged /nems/branches/dusan/sref.v7.0.0/doc:r48082-50624
   Merged /nems/branches/jun/o3fix/doc:r79823-79840
   Merged /nems/branches/Noah3.0/doc:r79081-80105
   Merged /nems/branches/juang/ticket22/doc:r25030-27070
   Merged /nems/branches/merge-cap/doc:r85705-85898
   Merged /nems/branches/wx20rv/reg_test/doc:r21202-21298
   Merged /nems/branches/q2fy17_gfs/doc:r72439-72730
   Merged /nems/branches/UGCS-Seasonal/atmgcfix/doc:r77240-77723
   Merged /nems/branches/ATM-refactor/doc:r83334-85705
   Merged /nems/branches/NUOPC/doc:r34889-39683
   Merged /nems/branches/update-docs/doc:r93396-94104
   Merged /nems/branches/UGCS-Seasonal/highresT574/doc:r76891-76977,76985-77665
   Merged /nems/branches/jun/wamfix_gfidigwd/doc:r28935-28986
   Merged /nems/branches/black/global_nests/doc:r42097-42627
   Merged /nems/branches/euge/trunk_copy/doc:r77677-77742
   Merged /nems/branches/NUOPC/weather/doc:r72211-72791,73753-73780
   Merged /nems/branches/restart_mvg_nests/doc:r20800-26559
   Merged /nems/branches/jun/nems_moorthi/doc:r73043-73567
   Merged /nems/branches/euge/nemstrunk_NEMSLegacyMD/doc:r78871-79816
Index: checkout/src/configure
===================================================================
--- checkout/src/configure	(revision 93212)
+++ checkout/src/configure	(revision 94669)
@@ -95,22 +95,22 @@
         ;;
     coupled_intel_wcoss)
         CONF_FILE="${CONF_FILE:-../../conf/configure.nems.Wcoss.intel}"
-        EXTERNALS_NEMS="${EXTERNALS_NEMS:-conf/externals.nems.Wcoss}"
+        EXTERNALS_NEMS="${EXTERNALS_NEMS:-../../conf/externals.nems.Wcoss}"
         CHOSEN_MODULE="${CHOSEN_MODULE:-wcoss.phase1/ESMF_NUOPC}"
         ;;
     coupled_intel_gaea)
         CONF_FILE="${CONF_FILE:-../../conf/configure.nems.Gaea.intel}"
-        EXTERNALS_NEMS="${EXTERNALS_NEMS:-conf/externals.nems.Gaea}"
+        EXTERNALS_NEMS="${EXTERNALS_NEMS:-../../conf/externals.nems.Gaea}"
         CHOSEN_MODULE="${CHOSEN_MODULE:-gaea/ESMF_NUOPC}"
         ;;
     coupled_intel_theia)
         CONF_FILE="${CONF_FILE:-../../conf/configure.nems.Theia.intel}"
-        EXTERNALS_NEMS="${EXTERNALS_NEMS:-conf/externals.nems.Theia}"
+        EXTERNALS_NEMS="${EXTERNALS_NEMS:-../../conf/externals.nems.Theia}"
         CHOSEN_MODULE="${CHOSEN_MODULE:-gaea/ESMF_NUOPC}"
         ;;
     coupled_intel_yellowstone)
         CONF_FILE="${CONF_FILE:-../../conf/configure.nems.Yellowstone.intel}"
-        EXTERNALS_NEMS="${EXTERNALS_NEMS:-conf/externals.nems.Yellowstone}"
+        EXTERNALS_NEMS="${EXTERNALS_NEMS:-../../conf/externals.nems.Yellowstone}"
         CHOSEN_MODULE="${CHOSEN_MODULE:-gaea/ESMF_NUOPC}"
         ;;
     coupled_linux_gnu)
@@ -141,6 +141,8 @@
     if [[ ! -e conf/externals.nems || -s conf/externals.nems ]] ; then
         cat /dev/null > conf/externals.nems
     fi
+elif [[ -s ../../conf/$EXTERNALS_NEMS ]] ; then
+    copy_diff_files "../../conf/$EXTERNALS_NEMS" conf/externals.nems
 else
     copy_diff_files "$EXTERNALS_NEMS" conf/externals.nems
 fi
Index: checkout/src
===================================================================
--- checkout/src	(revision 93212)
+++ checkout/src	(revision 94669)

Property changes on: checkout/src
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,6 ##
   Merged /nems/branches/NEMSUpdate/UGCSWeather/src:r93382-94235
   Merged /nems/branches/regtests/src:r92938-93574
   Merged /nems/branches/update-docs/src:r93396-94104
   Merged /nems/branches/merge-fixes/src:r94234-94668
   Merged /nems/branches/gocart-fix/src:r93814-94233
   Merged /nems/branches/sam/updated_medfixes_q3fy2017_lambda/src:r93214-93574
Index: checkout/NEMSAppBuilder
===================================================================
--- checkout/NEMSAppBuilder	(revision 93212)
+++ checkout/NEMSAppBuilder	(revision 94669)
@@ -7,6 +7,8 @@
 
 set -x
 
+ulimit -s 200000
+
 # add paths to find personal "dialog" installations on Theia, Yellowstone.
 PATH=$PATH:/scratch4/NCEPDEV/nems/save/Gerhard.Theurich/bin:/glade/p/work/theurich/bin
 
@@ -89,6 +91,12 @@
     MACHINE_ID=''
 fi
 
+MACHINE_ID_DOT=$( echo "$MACHINE_ID" | sed 's,_,\.,g' )
+MACHINE_ID_UNDER=$( echo "$MACHINE_ID" | sed 's,\.,_,g' )
+
+FULL_MACHINE_ID_DOT=$( echo "$FULL_MACHINE_ID" | sed 's,_,\.,g' )
+FULL_MACHINE_ID_UNDER=$( echo "$FULL_MACHINE_ID" | sed 's,\.,_,g' )
+
 if [[ "$MACHINE_ID" == yellowstone ]] ; then
     source /glade/apps/opt/lmod/lmod/init/bash
 elif [[ "$MACHINE_ID" == gaea ]] ; then
@@ -273,7 +281,7 @@
   $0
 Autobuild (non-interactive) mode:
   $0 auto \\
-    [app=APP] [flags] [question:answer] [+COMP1 [+COMP2 [...]]]
+    [project=PROJ] [flags] [question:answer] [+COMP1 [+COMP2 [...]]]
 
 Run with no arguments to start GUI mode.  This will fail if both
 dialog and whiptail are unavailable.
@@ -289,8 +297,9 @@
 
 App and component selection:
 
-  app=APPNAME --- use this app.  Only relevant if more than one app is
-                  available.
+  app=APP or
+  project=PROJ --- use this build target.  Only relevant if more than
+                   target is available.
   +COMP --- enable component COMP.  Only relevant if the app used is NEMS
 
 Question:Answer values:
@@ -342,6 +351,8 @@
             should_exit=yes
         elif [[ "$opt" =~ ^app=.+ ]] ; then
             app="${opt:4}"
+        elif [[ "$opt" =~ ^project=.+ ]] ; then
+            app="${opt:8}"
         elif [[ "$opt" =~ ^confopt=.+ ]] ; then
             confopt="${opt:8}"
         elif [[ "$opt" =~ ^makeopt=.+ ]] ; then
@@ -977,7 +988,7 @@
 
 build_ww3(){
   # in: COMP, COMP_SRCDIR, COMP_BINDIR
-    local WW3_COMP=Intel
+    local WW3_COMP='Intel'
   if [[ "$1" == "clean" ]] ; then
     echo "Cleaning $COMP..."
     cd "$COMP_SRCDIR/esmf"
@@ -990,7 +1001,7 @@
   else
     echo "Building $COMP..."
     cd "$COMP_SRCDIR/esmf"
-    make WW3_COMP="$WW3_COMP"
+    make WW3_COMP="$WW3_COMP" ww3_nems
     mkdir -p "$COMP_BINDIR"
     cp "$COMP_SRCDIR/nuopc.mk" "$COMP_BINDIR"
     if ([ ! -d "$COMP_BINDIR" ]); then
@@ -1311,7 +1322,10 @@
 
 autonote "Configure using $conf_mode mode."
 autonote "Command: ./configure $CONFOPT $CHOSEN_MODULE"
-./configure "$CONFOPT" $CHOSEN_MODULE 2>&1 | tee $LOGDIR/appBuilder.NEMS.log.$$
+./configure "$CONFOPT" $CHOSEN_MODULE \
+     ${EXTERNALS_NEMS:-''} \
+     ${ESMF_VERSION_DEFINE=''} \
+     2>&1 | tee $LOGDIR/appBuilder.NEMS.log.$$
 
 echo >> $LOGDIR/appBuilder.NEMS.log.$$
 cd $ROOTDIR
Index: checkout
===================================================================
--- checkout	(revision 93212)
+++ checkout	(revision 94669)

Property changes on: checkout
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,6 ##
   Merged /nems/branches/update-docs:r93396-94104
   Merged /nems/branches/gocart-fix:r93814-94233
   Merged /nems/branches/merge-fixes:r94234-94668
   Merged /nems/branches/NEMSUpdate/UGCSWeather:r93382-94235,94237-94655
   Merged /nems/branches/regtests:r92938-93574
   Merged /nems/branches/sam/updated_medfixes_q3fy2017_lambda:r93214-93574


More information about the Ncep.list.nems.announce mailing list